applied-ai-018 commited on
Commit
068e5e3
·
verified ·
1 Parent(s): d94d830

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/UNKNOWN.egg-info/PKG-INFO +11 -0
  2. lm-evaluation/UNKNOWN.egg-info/SOURCES.txt +8 -0
  3. lm-evaluation/UNKNOWN.egg-info/dependency_links.txt +1 -0
  4. lm-evaluation/UNKNOWN.egg-info/top_level.txt +1 -0
  5. lm-evaluation/examples/lm-eval-overview.ipynb +1231 -0
  6. lm-evaluation/examples/visualize-wandb.ipynb +168 -0
  7. lm-evaluation/examples/visualize-zeno.ipynb +115 -0
  8. lm-evaluation/lm_eval/__pycache__/__init__.cpython-310.pyc +0 -0
  9. lm-evaluation/lm_eval/__pycache__/__main__.cpython-310.pyc +0 -0
  10. lm-evaluation/lm_eval/__pycache__/evaluator.cpython-310.pyc +0 -0
  11. lm-evaluation/lm_eval/__pycache__/evaluator_utils.cpython-310.pyc +0 -0
  12. lm-evaluation/lm_eval/__pycache__/logging_utils.cpython-310.pyc +0 -0
  13. lm-evaluation/lm_eval/__pycache__/utils.cpython-310.pyc +0 -0
  14. lm-evaluation/lm_eval/caching/__pycache__/cache.cpython-310.pyc +0 -0
  15. lm-evaluation/lm_eval/models/__init__.py +26 -0
  16. lm-evaluation/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc +0 -0
  17. lm-evaluation/lm_eval/models/__pycache__/dummy.cpython-310.pyc +0 -0
  18. lm-evaluation/lm_eval/models/__pycache__/gguf.cpython-310.pyc +0 -0
  19. lm-evaluation/lm_eval/models/__pycache__/huggingface.cpython-310.pyc +0 -0
  20. lm-evaluation/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc +0 -0
  21. lm-evaluation/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc +0 -0
  22. lm-evaluation/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc +0 -0
  23. lm-evaluation/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc +0 -0
  24. lm-evaluation/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc +0 -0
  25. lm-evaluation/lm_eval/models/__pycache__/textsynth.cpython-310.pyc +0 -0
  26. lm-evaluation/lm_eval/models/__pycache__/utils.cpython-310.pyc +0 -0
  27. lm-evaluation/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc +0 -0
  28. lm-evaluation/lm_eval/models/anthropic_llms.py +360 -0
  29. lm-evaluation/lm_eval/models/dummy.py +41 -0
  30. lm-evaluation/lm_eval/models/gguf.py +130 -0
  31. lm-evaluation/lm_eval/models/huggingface.py +1243 -0
  32. lm-evaluation/lm_eval/models/mamba_lm.py +126 -0
  33. lm-evaluation/lm_eval/models/nemo_lm.py +537 -0
  34. lm-evaluation/lm_eval/models/neuron_optimum.py +736 -0
  35. lm-evaluation/lm_eval/models/openai_completions.py +481 -0
  36. lm-evaluation/lm_eval/models/optimum_lm.py +69 -0
  37. lm-evaluation/lm_eval/models/textsynth.py +171 -0
  38. lm-evaluation/lm_eval/models/utils.py +615 -0
  39. lm-evaluation/lm_eval/models/vllm_causallms.py +487 -0
  40. lm-evaluation/lm_eval/prompts/__init__.py +126 -0
  41. lm-evaluation/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc +0 -0
  42. lm-evaluation/tests/__init__.py +0 -0
  43. lm-evaluation/tests/models/test_gguf.py +152 -0
  44. lm-evaluation/tests/models/test_huggingface.py +143 -0
  45. lm-evaluation/tests/models/test_neuron_optimum.py +26 -0
  46. lm-evaluation/tests/models/test_openvino.py +73 -0
  47. lm-evaluation/tests/models/test_vllm.py +51 -0
  48. lm-evaluation/tests/test_cli.py +43 -0
  49. lm-evaluation/tests/test_evaluator.py +65 -0
  50. lm-evaluation/tests/test_janitor.py +507 -0
lm-evaluation/UNKNOWN.egg-info/PKG-INFO ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: UNKNOWN
3
+ Version: 0.0.0
4
+ Summary: UNKNOWN
5
+ Home-page: UNKNOWN
6
+ License: UNKNOWN
7
+ Platform: UNKNOWN
8
+ License-File: LICENSE.md
9
+
10
+ UNKNOWN
11
+
lm-evaluation/UNKNOWN.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ LICENSE.md
2
+ README.md
3
+ pyproject.toml
4
+ setup.py
5
+ UNKNOWN.egg-info/PKG-INFO
6
+ UNKNOWN.egg-info/SOURCES.txt
7
+ UNKNOWN.egg-info/dependency_links.txt
8
+ UNKNOWN.egg-info/top_level.txt
lm-evaluation/UNKNOWN.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
lm-evaluation/UNKNOWN.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
lm-evaluation/examples/lm-eval-overview.ipynb ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "Qw83KAePAhaS"
7
+ },
8
+ "source": [
9
+ "# Releasing LM-Evaluation-Harness v0.4.0"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "Z7k2vq1iAdqr"
16
+ },
17
+ "source": [
18
+ "With the vast amount of work done in the field today, it helps to have a tool that people can use easily to share their results and use to check others to ensure reported numbers are valid. The LM Evaluation Harness is one such tool the community has used extensively. We want to continue to support the community and with that in mind, we’re excited to announce a major update on the LM Evaluation Harness to further our goal for open and accessible AI research."
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "markdown",
23
+ "metadata": {
24
+ "id": "0gDoM0AJAvEc"
25
+ },
26
+ "source": [
27
+ "Our refactor stems from our desires to make the following believed best practices easier to carry out. \n",
28
+ "\n",
29
+ "1. Never copy results from other papers\n",
30
+ "2. Always share your exact prompts\n",
31
+ "3. Always provide model outputs\n",
32
+ "4. Qualitatively review a small batch of outputs before running evaluation jobs at scale\n",
33
+ "\n",
34
+ "We also wanted to make the library a better experience to use and to contribute or design evaluations within. New features in the new release that serve this purpose include:\n",
35
+ "\n",
36
+ "1. Faster Evaluation Runtimes (accelerated data-parallel inference with HF Transformers + Accelerate, and commonly used or faster inference libraries such as vLLM and Llama-CPP)\n",
37
+ "2. Easier addition and sharing of new tasks (YAML-based task config formats, allowing single-file sharing of custom tasks)\n",
38
+ "3. More configurability, for more advanced workflows and easier operation with modifying prompts\n",
39
+ "4. Better logging of data at runtime and post-hoc"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "markdown",
44
+ "metadata": {
45
+ "id": "nnwsOpjda_YW"
46
+ },
47
+ "source": [
48
+ "In this notebook we will be going through a short tutorial on how things work."
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "metadata": {
54
+ "id": "zAov81vTbL2K"
55
+ },
56
+ "source": [
57
+ "## Install LM-Eval"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 1,
63
+ "metadata": {
64
+ "colab": {
65
+ "base_uri": "https://localhost:8080/"
66
+ },
67
+ "id": "8hiosGzq_qZg",
68
+ "outputId": "6ab73e5e-1f54-417e-a388-07e0d870b132"
69
+ },
70
+ "outputs": [
71
+ {
72
+ "name": "stdout",
73
+ "output_type": "stream",
74
+ "text": [
75
+ "Collecting git+https://github.com/EleutherAI/lm-evaluation-harness.git@big-refactor\n",
76
+ " Cloning https://github.com/EleutherAI/lm-evaluation-harness.git (to revision big-refactor) to /tmp/pip-req-build-tnssql5s\n",
77
+ " Running command git clone --filter=blob:none --quiet https://github.com/EleutherAI/lm-evaluation-harness.git /tmp/pip-req-build-tnssql5s\n",
78
+ " Running command git checkout -b big-refactor --track origin/big-refactor\n",
79
+ " Switched to a new branch 'big-refactor'\n",
80
+ " Branch 'big-refactor' set up to track remote branch 'big-refactor' from 'origin'.\n",
81
+ " Resolved https://github.com/EleutherAI/lm-evaluation-harness.git to commit 42f486ee49b65926a444cb0620870a39a5b4b0a8\n",
82
+ " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
83
+ " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
84
+ " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
85
+ "Collecting accelerate>=0.21.0 (from lm-eval==1.0.0)\n",
86
+ " Downloading accelerate-0.24.1-py3-none-any.whl (261 kB)\n",
87
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
88
+ "\u001b[?25hCollecting evaluate (from lm-eval==1.0.0)\n",
89
+ " Downloading evaluate-0.4.1-py3-none-any.whl (84 kB)\n",
90
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m84.1/84.1 kB\u001b[0m \u001b[31m5.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
91
+ "\u001b[?25hCollecting datasets>=2.0.0 (from lm-eval==1.0.0)\n",
92
+ " Downloading datasets-2.15.0-py3-none-any.whl (521 kB)\n",
93
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m521.2/521.2 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
94
+ "\u001b[?25hCollecting jsonlines (from lm-eval==1.0.0)\n",
95
+ " Downloading jsonlines-4.0.0-py3-none-any.whl (8.7 kB)\n",
96
+ "Requirement already satisfied: numexpr in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.8.7)\n",
97
+ "Collecting peft>=0.2.0 (from lm-eval==1.0.0)\n",
98
+ " Downloading peft-0.6.2-py3-none-any.whl (174 kB)\n",
99
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m174.7/174.7 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
100
+ "\u001b[?25hCollecting pybind11>=2.6.2 (from lm-eval==1.0.0)\n",
101
+ " Downloading pybind11-2.11.1-py3-none-any.whl (227 kB)\n",
102
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m227.7/227.7 kB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
103
+ "\u001b[?25hCollecting pytablewriter (from lm-eval==1.0.0)\n",
104
+ " Downloading pytablewriter-1.2.0-py3-none-any.whl (111 kB)\n",
105
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m111.1/111.1 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
106
+ "\u001b[?25hCollecting rouge-score>=0.0.4 (from lm-eval==1.0.0)\n",
107
+ " Downloading rouge_score-0.1.2.tar.gz (17 kB)\n",
108
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
109
+ "Collecting sacrebleu>=1.5.0 (from lm-eval==1.0.0)\n",
110
+ " Downloading sacrebleu-2.3.2-py3-none-any.whl (119 kB)\n",
111
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m119.7/119.7 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
112
+ "\u001b[?25hRequirement already satisfied: scikit-learn>=0.24.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (1.2.2)\n",
113
+ "Collecting sqlitedict (from lm-eval==1.0.0)\n",
114
+ " Downloading sqlitedict-2.1.0.tar.gz (21 kB)\n",
115
+ " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
116
+ "Requirement already satisfied: torch>=1.8 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (2.1.0+cu118)\n",
117
+ "Collecting tqdm-multiprocess (from lm-eval==1.0.0)\n",
118
+ " Downloading tqdm_multiprocess-0.0.11-py3-none-any.whl (9.8 kB)\n",
119
+ "Requirement already satisfied: transformers>=4.1 in /usr/local/lib/python3.10/dist-packages (from lm-eval==1.0.0) (4.35.2)\n",
120
+ "Collecting zstandard (from lm-eval==1.0.0)\n",
121
+ " Downloading zstandard-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.4 MB)\n",
122
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m29.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
123
+ "\u001b[?25hRequirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (1.23.5)\n",
124
+ "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (23.2)\n",
125
+ "Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (5.9.5)\n",
126
+ "Requirement already satisfied: pyyaml in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (6.0.1)\n",
127
+ "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.21.0->lm-eval==1.0.0) (0.19.4)\n",
128
+ "Requirement already satisfied: pyarrow>=8.0.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (9.0.0)\n",
129
+ "Collecting pyarrow-hotfix (from datasets>=2.0.0->lm-eval==1.0.0)\n",
130
+ " Downloading pyarrow_hotfix-0.6-py3-none-any.whl (7.9 kB)\n",
131
+ "Collecting dill<0.3.8,>=0.3.0 (from datasets>=2.0.0->lm-eval==1.0.0)\n",
132
+ " Downloading dill-0.3.7-py3-none-any.whl (115 kB)\n",
133
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m14.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
134
+ "\u001b[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (1.5.3)\n",
135
+ "Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2.31.0)\n",
136
+ "Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (4.66.1)\n",
137
+ "Requirement already satisfied: xxhash in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.4.1)\n",
138
+ "Collecting multiprocess (from datasets>=2.0.0->lm-eval==1.0.0)\n",
139
+ " Downloading multiprocess-0.70.15-py310-none-any.whl (134 kB)\n",
140
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m19.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
141
+ "\u001b[?25hRequirement already satisfied: fsspec[http]<=2023.10.0,>=2023.1.0 in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (2023.6.0)\n",
142
+ "Requirement already satisfied: aiohttp in /usr/local/lib/python3.10/dist-packages (from datasets>=2.0.0->lm-eval==1.0.0) (3.8.6)\n",
143
+ "Collecting responses<0.19 (from evaluate->lm-eval==1.0.0)\n",
144
+ " Downloading responses-0.18.0-py3-none-any.whl (38 kB)\n",
145
+ "Requirement already satisfied: safetensors in /usr/local/lib/python3.10/dist-packages (from peft>=0.2.0->lm-eval==1.0.0) (0.4.0)\n",
146
+ "Requirement already satisfied: absl-py in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (1.4.0)\n",
147
+ "Requirement already satisfied: nltk in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (3.8.1)\n",
148
+ "Requirement already satisfied: six>=1.14.0 in /usr/local/lib/python3.10/dist-packages (from rouge-score>=0.0.4->lm-eval==1.0.0) (1.16.0)\n",
149
+ "Collecting portalocker (from sacrebleu>=1.5.0->lm-eval==1.0.0)\n",
150
+ " Downloading portalocker-2.8.2-py3-none-any.whl (17 kB)\n",
151
+ "Requirement already satisfied: regex in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (2023.6.3)\n",
152
+ "Requirement already satisfied: tabulate>=0.8.9 in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (0.9.0)\n",
153
+ "Collecting colorama (from sacrebleu>=1.5.0->lm-eval==1.0.0)\n",
154
+ " Downloading colorama-0.4.6-py2.py3-none-any.whl (25 kB)\n",
155
+ "Requirement already satisfied: lxml in /usr/local/lib/python3.10/dist-packages (from sacrebleu>=1.5.0->lm-eval==1.0.0) (4.9.3)\n",
156
+ "Requirement already satisfied: scipy>=1.3.2 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (1.11.3)\n",
157
+ "Requirement already satisfied: joblib>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (1.3.2)\n",
158
+ "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn>=0.24.1->lm-eval==1.0.0) (3.2.0)\n",
159
+ "Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.13.1)\n",
160
+ "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (4.5.0)\n",
161
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (1.12)\n",
162
+ "Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.2.1)\n",
163
+ "Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (3.1.2)\n",
164
+ "Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch>=1.8->lm-eval==1.0.0) (2.1.0)\n",
165
+ "Requirement already satisfied: tokenizers<0.19,>=0.14 in /usr/local/lib/python3.10/dist-packages (from transformers>=4.1->lm-eval==1.0.0) (0.15.0)\n",
166
+ "Requirement already satisfied: attrs>=19.2.0 in /usr/local/lib/python3.10/dist-packages (from jsonlines->lm-eval==1.0.0) (23.1.0)\n",
167
+ "Requirement already satisfied: setuptools>=38.3.0 in /usr/local/lib/python3.10/dist-packages (from pytablewriter->lm-eval==1.0.0) (67.7.2)\n",
168
+ "Collecting DataProperty<2,>=1.0.1 (from pytablewriter->lm-eval==1.0.0)\n",
169
+ " Downloading DataProperty-1.0.1-py3-none-any.whl (27 kB)\n",
170
+ "Collecting mbstrdecoder<2,>=1.0.0 (from pytablewriter->lm-eval==1.0.0)\n",
171
+ " Downloading mbstrdecoder-1.1.3-py3-none-any.whl (7.8 kB)\n",
172
+ "Collecting pathvalidate<4,>=2.3.0 (from pytablewriter->lm-eval==1.0.0)\n",
173
+ " Downloading pathvalidate-3.2.0-py3-none-any.whl (23 kB)\n",
174
+ "Collecting tabledata<2,>=1.3.1 (from pytablewriter->lm-eval==1.0.0)\n",
175
+ " Downloading tabledata-1.3.3-py3-none-any.whl (11 kB)\n",
176
+ "Collecting tcolorpy<1,>=0.0.5 (from pytablewriter->lm-eval==1.0.0)\n",
177
+ " Downloading tcolorpy-0.1.4-py3-none-any.whl (7.9 kB)\n",
178
+ "Collecting typepy[datetime]<2,>=1.3.2 (from pytablewriter->lm-eval==1.0.0)\n",
179
+ " Downloading typepy-1.3.2-py3-none-any.whl (31 kB)\n",
180
+ "Requirement already satisfied: charset-normalizer<4.0,>=2.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (3.3.2)\n",
181
+ "Requirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (6.0.4)\n",
182
+ "Requirement already satisfied: async-timeout<5.0,>=4.0.0a3 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (4.0.3)\n",
183
+ "Requirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.9.2)\n",
184
+ "Requirement already satisfied: frozenlist>=1.1.1 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.4.0)\n",
185
+ "Requirement already satisfied: aiosignal>=1.1.2 in /usr/local/lib/python3.10/dist-packages (from aiohttp->datasets>=2.0.0->lm-eval==1.0.0) (1.3.1)\n",
186
+ "Requirement already satisfied: chardet<6,>=3.0.4 in /usr/local/lib/python3.10/dist-packages (from mbstrdecoder<2,>=1.0.0->pytablewriter->lm-eval==1.0.0) (5.2.0)\n",
187
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (3.4)\n",
188
+ "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (2.0.7)\n",
189
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.19.0->datasets>=2.0.0->lm-eval==1.0.0) (2023.7.22)\n",
190
+ "Requirement already satisfied: python-dateutil<3.0.0,>=2.8.0 in /usr/local/lib/python3.10/dist-packages (from typepy[datetime]<2,>=1.3.2->pytablewriter->lm-eval==1.0.0) (2.8.2)\n",
191
+ "Requirement already satisfied: pytz>=2018.9 in /usr/local/lib/python3.10/dist-packages (from typepy[datetime]<2,>=1.3.2->pytablewriter->lm-eval==1.0.0) (2023.3.post1)\n",
192
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch>=1.8->lm-eval==1.0.0) (2.1.3)\n",
193
+ "Requirement already satisfied: click in /usr/local/lib/python3.10/dist-packages (from nltk->rouge-score>=0.0.4->lm-eval==1.0.0) (8.1.7)\n",
194
+ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch>=1.8->lm-eval==1.0.0) (1.3.0)\n",
195
+ "Building wheels for collected packages: lm-eval, rouge-score, sqlitedict\n",
196
+ " Building wheel for lm-eval (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
197
+ " Created wheel for lm-eval: filename=lm_eval-1.0.0-py3-none-any.whl size=994254 sha256=88356155b19f2891981ecef948326ad6ce8ca40a6009378410ec20d0e225995a\n",
198
+ " Stored in directory: /tmp/pip-ephem-wheel-cache-9v6ye7h3/wheels/17/01/26/599c0779e9858a70a73fa8a306699b5b9a868f820c225457b0\n",
199
+ " Building wheel for rouge-score (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
200
+ " Created wheel for rouge-score: filename=rouge_score-0.1.2-py3-none-any.whl size=24933 sha256=6bb0d44e4881972c43ce194e7cb65233d309758cb15f0dec54590d3d2efcfc36\n",
201
+ " Stored in directory: /root/.cache/pip/wheels/5f/dd/89/461065a73be61a532ff8599a28e9beef17985c9e9c31e541b4\n",
202
+ " Building wheel for sqlitedict (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
203
+ " Created wheel for sqlitedict: filename=sqlitedict-2.1.0-py3-none-any.whl size=16863 sha256=5747f7dd73ddf3d8fbcebf51b5e4f718fabe1e94bccdf16d2f22a2e65ee7fdf4\n",
204
+ " Stored in directory: /root/.cache/pip/wheels/79/d6/e7/304e0e6cb2221022c26d8161f7c23cd4f259a9e41e8bbcfabd\n",
205
+ "Successfully built lm-eval rouge-score sqlitedict\n",
206
+ "Installing collected packages: sqlitedict, zstandard, tcolorpy, pybind11, pyarrow-hotfix, portalocker, pathvalidate, mbstrdecoder, jsonlines, dill, colorama, typepy, tqdm-multiprocess, sacrebleu, rouge-score, responses, multiprocess, accelerate, datasets, DataProperty, tabledata, peft, evaluate, pytablewriter, lm-eval\n",
207
+ "Successfully installed DataProperty-1.0.1 accelerate-0.24.1 colorama-0.4.6 datasets-2.15.0 dill-0.3.7 evaluate-0.4.1 jsonlines-4.0.0 lm-eval-1.0.0 mbstrdecoder-1.1.3 multiprocess-0.70.15 pathvalidate-3.2.0 peft-0.6.2 portalocker-2.8.2 pyarrow-hotfix-0.6 pybind11-2.11.1 pytablewriter-1.2.0 responses-0.18.0 rouge-score-0.1.2 sacrebleu-2.3.2 sqlitedict-2.1.0 tabledata-1.3.3 tcolorpy-0.1.4 tqdm-multiprocess-0.0.11 typepy-1.3.2 zstandard-0.22.0\n"
208
+ ]
209
+ }
210
+ ],
211
+ "source": [
212
+ "# Install LM-Eval\n",
213
+ "!pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@big-refactor"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": 2,
219
+ "metadata": {
220
+ "colab": {
221
+ "base_uri": "https://localhost:8080/",
222
+ "height": 0,
223
+ "referenced_widgets": [
224
+ "a1d3a8aa016544a78e8821c8f6199e06",
225
+ "f61ed33fad754146bdd2ac9db1ba1c48",
226
+ "bfa0af6aeff344c6845e1080a878e92e",
227
+ "fd1ad9e0367d4004aae853b91c3a7617",
228
+ "6b2d90209ec14230b3d58a74ac9b83bf",
229
+ "a73f357065d34d7baf0453ae4a8d75e2",
230
+ "46f521b73fd943c081c648fd873ebc0a",
231
+ "7c5689bc13684db8a22681f41863dddd",
232
+ "48763b6233374554ae76035c0483066f",
233
+ "4986a21eb560448fa79f4b25cde48951",
234
+ "aed3acd2f2d74003b44079c333a0698e"
235
+ ]
236
+ },
237
+ "id": "uyO5MaKkZyah",
238
+ "outputId": "d46e8096-5086-4e49-967e-ea33d4a2a335"
239
+ },
240
+ "outputs": [
241
+ {
242
+ "data": {
243
+ "application/vnd.jupyter.widget-view+json": {
244
+ "model_id": "a1d3a8aa016544a78e8821c8f6199e06",
245
+ "version_major": 2,
246
+ "version_minor": 0
247
+ },
248
+ "text/plain": [
249
+ "Downloading builder script: 0%| | 0.00/5.67k [00:00<?, ?B/s]"
250
+ ]
251
+ },
252
+ "metadata": {},
253
+ "output_type": "display_data"
254
+ }
255
+ ],
256
+ "source": [
257
+ "from lm_eval import api"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "markdown",
262
+ "metadata": {
263
+ "id": "8rfUeX6n_wkK"
264
+ },
265
+ "source": [
266
+ "## Create new evaluation tasks with config-based tasks\n",
267
+ "\n",
268
+ "Even within the same task, many works have reported numbers based on different choices of evaluation. Some report on the test sets, validation sets, or even subset of the training sets. Others have specialized prompts and verbalizers. We introduce YAMLs to allow users to easily make different variations. By leveraging the YAML configs to configure evaluations, the refactored LM-Eval takes the methods of the `Task` object and makes them configurable by setting the appropriate attributes in the config file. There, users can set the tasks they want by setting the name of the HF dataset (local tasks are also possible), the dataset splits used, and much more. Key configurations relating to prompting, such as `doc_to_text`, previously implemented as a method of the same name, are now configurable with jinja2 to allow high-level scripting to transform a HF dataset to text string as input to the model.\n",
269
+ "\n"
270
+ ]
271
+ },
272
+ {
273
+ "cell_type": "markdown",
274
+ "metadata": {
275
+ "id": "HYFUhhfOSJKe"
276
+ },
277
+ "source": [
278
+ "A core-feature to LM-Eval is to configure tasks with YAML configs. With configs, you can fill preset fields to easily set up a task.\n",
279
+ "\n",
280
+ "Here, we write a demo YAML config for a multiple-choice evaluation of BoolQ:"
281
+ ]
282
+ },
283
+ {
284
+ "cell_type": "code",
285
+ "execution_count": 3,
286
+ "metadata": {
287
+ "id": "bg3dGROW-V39"
288
+ },
289
+ "outputs": [],
290
+ "source": [
291
+ "YAML_boolq_string = '''\n",
292
+ "task: demo_boolq\n",
293
+ "dataset_path: super_glue\n",
294
+ "dataset_name: boolq\n",
295
+ "output_type: multiple_choice\n",
296
+ "training_split: train\n",
297
+ "validation_split: validation\n",
298
+ "doc_to_text: \"{{passage}}\\nQuestion: {{question}}?\\nAnswer:\"\n",
299
+ "doc_to_target: label\n",
300
+ "doc_to_choice: [\"no\", \"yes\"]\n",
301
+ "should_decontaminate: true\n",
302
+ "doc_to_decontamination_query: passage\n",
303
+ "metric_list:\n",
304
+ " - metric: acc\n",
305
+ "'''\n",
306
+ "with open('boolq.yaml', 'w') as f:\n",
307
+ " f.write(YAML_boolq_string)"
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "markdown",
312
+ "metadata": {},
313
+ "source": [
314
+ "And we can now run evaluation on this task, by pointing to the config file we've just created:"
315
+ ]
316
+ },
317
+ {
318
+ "cell_type": "code",
319
+ "execution_count": 4,
320
+ "metadata": {
321
+ "id": "LOUHK7PtQfq4"
322
+ },
323
+ "outputs": [
324
+ {
325
+ "name": "stdout",
326
+ "output_type": "stream",
327
+ "text": [
328
+ "2023-11-29:11:54:55,156 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
329
+ "2023-11-29 11:54:55.942051: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
330
+ "2023-11-29 11:54:55.942108: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
331
+ "2023-11-29 11:54:55.942142: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
332
+ "2023-11-29 11:54:57.066802: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
333
+ "2023-11-29:11:55:00,954 INFO [__main__.py:132] Verbosity set to INFO\n",
334
+ "2023-11-29:11:55:11,038 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
335
+ "2023-11-29:11:55:11,038 INFO [__main__.py:143] Including path: ./\n",
336
+ "2023-11-29:11:55:11,046 INFO [__main__.py:205] Selected Tasks: ['demo_boolq']\n",
337
+ "2023-11-29:11:55:11,047 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
338
+ "2023-11-29:11:55:11,110 INFO [huggingface.py:120] Using device 'cuda'\n",
339
+ "config.json: 100% 571/571 [00:00<00:00, 2.87MB/s]\n",
340
+ "model.safetensors: 100% 5.68G/5.68G [00:32<00:00, 173MB/s]\n",
341
+ "tokenizer_config.json: 100% 396/396 [00:00<00:00, 2.06MB/s]\n",
342
+ "tokenizer.json: 100% 2.11M/2.11M [00:00<00:00, 11.6MB/s]\n",
343
+ "special_tokens_map.json: 100% 99.0/99.0 [00:00<00:00, 555kB/s]\n",
344
+ "2023-11-29:11:56:18,658 WARNING [task.py:614] [Task: demo_boolq] metric acc is defined, but aggregation is not. using default aggregation=mean\n",
345
+ "2023-11-29:11:56:18,658 WARNING [task.py:626] [Task: demo_boolq] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n",
346
+ "Downloading builder script: 100% 30.7k/30.7k [00:00<00:00, 59.0MB/s]\n",
347
+ "Downloading metadata: 100% 38.7k/38.7k [00:00<00:00, 651kB/s]\n",
348
+ "Downloading readme: 100% 14.8k/14.8k [00:00<00:00, 37.3MB/s]\n",
349
+ "Downloading data: 100% 4.12M/4.12M [00:00<00:00, 55.1MB/s]\n",
350
+ "Generating train split: 100% 9427/9427 [00:00<00:00, 15630.89 examples/s]\n",
351
+ "Generating validation split: 100% 3270/3270 [00:00<00:00, 20002.56 examples/s]\n",
352
+ "Generating test split: 100% 3245/3245 [00:00<00:00, 20866.19 examples/s]\n",
353
+ "2023-11-29:11:56:22,315 INFO [task.py:355] Building contexts for task on rank 0...\n",
354
+ "2023-11-29:11:56:22,322 INFO [evaluator.py:319] Running loglikelihood requests\n",
355
+ "100% 20/20 [00:04<00:00, 4.37it/s]\n",
356
+ "fatal: not a git repository (or any of the parent directories): .git\n",
357
+ "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
358
+ "| Tasks |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
359
+ "|----------|-------|------|-----:|------|----:|---|-----:|\n",
360
+ "|demo_boolq|Yaml |none | 0|acc | 1|± | 0|\n",
361
+ "\n"
362
+ ]
363
+ }
364
+ ],
365
+ "source": [
366
+ "!lm_eval \\\n",
367
+ " --model hf \\\n",
368
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
369
+ " --include_path ./ \\\n",
370
+ " --tasks demo_boolq \\\n",
371
+ " --limit 10\n"
372
+ ]
373
+ },
374
+ {
375
+ "cell_type": "markdown",
376
+ "metadata": {
377
+ "id": "LOUHK7PtQfq4"
378
+ },
379
+ "source": [
380
+ "Often, tasks are part of a larger group used to measure different capabilities. The dynamism of the field today means new dimensions of evaluation can come about which would mix and match new and older tasks alike. In LM-Eval, We can also group tasks and call that the group name to evaluate on a set of tasks easily. In this instance, let's evaluate the group `yes_or_no_tasks` which comprise of the tasks `demo_boolq` and `demo_cola`; tasks which are multiple choice tasks with options `yes` and `no` as the name suggests.\n",
381
+ "\n",
382
+ "<!-- making new groups is easier than ever, allowing user to work bottom-up by makiing individual tasks and linking them to a group or Top-Down, making a new group by listing existing tasks.\n",
383
+ "\n",
384
+ "We also show the aggregate across samples besides only showing the aggregation between subtasks. This may come in handy when certain groups want to be aggregated as a single task. -->\n",
385
+ "\n",
386
+ "\n"
387
+ ]
388
+ },
389
+ {
390
+ "cell_type": "code",
391
+ "execution_count": 5,
392
+ "metadata": {
393
+ "id": "fthNg3ywO-kA"
394
+ },
395
+ "outputs": [],
396
+ "source": [
397
+ "YAML_cola_string = '''\n",
398
+ "group: yes_or_no_tasks\n",
399
+ "task: demo_cola\n",
400
+ "dataset_path: glue\n",
401
+ "dataset_name: cola\n",
402
+ "output_type: multiple_choice\n",
403
+ "training_split: train\n",
404
+ "validation_split: validation\n",
405
+ "doc_to_text: \"{{sentence}}\\nQuestion: Does this sentence make sense?\\nAnswer:\"\n",
406
+ "doc_to_target: label\n",
407
+ "doc_to_choice: [\"no\", \"yes\"]\n",
408
+ "should_decontaminate: true\n",
409
+ "doc_to_decontamination_query: sentence\n",
410
+ "metric_list:\n",
411
+ " - metric: acc\n",
412
+ "'''\n",
413
+ "with open('cola.yaml', 'w') as f:\n",
414
+ " f.write(YAML_cola_string)"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": 6,
420
+ "metadata": {
421
+ "id": "XceRKCuuDtbn"
422
+ },
423
+ "outputs": [
424
+ {
425
+ "name": "stdout",
426
+ "output_type": "stream",
427
+ "text": [
428
+ "2023-11-29:11:56:33,016 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
429
+ "2023-11-29 11:56:33.852995: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
430
+ "2023-11-29 11:56:33.853050: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
431
+ "2023-11-29 11:56:33.853087: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
432
+ "2023-11-29 11:56:35.129047: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
433
+ "2023-11-29:11:56:38,546 INFO [__main__.py:132] Verbosity set to INFO\n",
434
+ "2023-11-29:11:56:47,509 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
435
+ "2023-11-29:11:56:47,509 INFO [__main__.py:143] Including path: ./\n",
436
+ "2023-11-29:11:56:47,517 INFO [__main__.py:205] Selected Tasks: ['yes_or_no_tasks']\n",
437
+ "2023-11-29:11:56:47,520 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
438
+ "2023-11-29:11:56:47,550 INFO [huggingface.py:120] Using device 'cuda'\n",
439
+ "2023-11-29:11:57:08,743 WARNING [task.py:614] [Task: demo_cola] metric acc is defined, but aggregation is not. using default aggregation=mean\n",
440
+ "2023-11-29:11:57:08,743 WARNING [task.py:626] [Task: demo_cola] metric acc is defined, but higher_is_better is not. using default higher_is_better=True\n",
441
+ "Downloading builder script: 100% 28.8k/28.8k [00:00<00:00, 52.7MB/s]\n",
442
+ "Downloading metadata: 100% 28.7k/28.7k [00:00<00:00, 51.9MB/s]\n",
443
+ "Downloading readme: 100% 27.9k/27.9k [00:00<00:00, 48.0MB/s]\n",
444
+ "Downloading data: 100% 377k/377k [00:00<00:00, 12.0MB/s]\n",
445
+ "Generating train split: 100% 8551/8551 [00:00<00:00, 19744.58 examples/s]\n",
446
+ "Generating validation split: 100% 1043/1043 [00:00<00:00, 27057.01 examples/s]\n",
447
+ "Generating test split: 100% 1063/1063 [00:00<00:00, 22705.17 examples/s]\n",
448
+ "2023-11-29:11:57:11,698 INFO [task.py:355] Building contexts for task on rank 0...\n",
449
+ "2023-11-29:11:57:11,704 INFO [evaluator.py:319] Running loglikelihood requests\n",
450
+ "100% 20/20 [00:03<00:00, 5.15it/s]\n",
451
+ "fatal: not a git repository (or any of the parent directories): .git\n",
452
+ "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
453
+ "| Tasks |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
454
+ "|---------------|-------|------|-----:|------|----:|---|-----:|\n",
455
+ "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n",
456
+ "| - demo_cola |Yaml |none | 0|acc | 0.7|± |0.1528|\n",
457
+ "\n",
458
+ "| Groups |Version|Filter|n-shot|Metric|Value| |Stderr|\n",
459
+ "|---------------|-------|------|-----:|------|----:|---|-----:|\n",
460
+ "|yes_or_no_tasks|N/A |none | 0|acc | 0.7|± |0.1528|\n",
461
+ "\n"
462
+ ]
463
+ }
464
+ ],
465
+ "source": [
466
+ "# !accelerate launch --no_python\n",
467
+ "!lm_eval \\\n",
468
+ " --model hf \\\n",
469
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
470
+ " --include_path ./ \\\n",
471
+ " --tasks yes_or_no_tasks \\\n",
472
+ " --limit 10 \\\n",
473
+ " --output output/yes_or_no_tasks/ \\\n",
474
+ " --log_samples\n"
475
+ ]
476
+ },
477
+ {
478
+ "cell_type": "markdown",
479
+ "metadata": {
480
+ "id": "XceRKCuuDtbn"
481
+ },
482
+ "source": [
483
+ "## Edit Prompt Templates Quickly\n",
484
+ "\n",
485
+ "The following is a yaml made to evaluate the specific subtask of `high_school_geography` from MMLU. It uses the standard prompt where the we choose the letters from the options with most likelihood as the model's prediction."
486
+ ]
487
+ },
488
+ {
489
+ "cell_type": "code",
490
+ "execution_count": 7,
491
+ "metadata": {
492
+ "id": "GTFvdt9kSlBG"
493
+ },
494
+ "outputs": [],
495
+ "source": [
496
+ "YAML_mmlu_geo_string = '''\n",
497
+ "group: mmlu\n",
498
+ "task: demo_mmlu_high_school_geography\n",
499
+ "dataset_path: cais/mmlu\n",
500
+ "dataset_name: high_school_geography\n",
501
+ "description: \"The following are multiple choice questions (with answers) about high school geography.\\n\\n\"\n",
502
+ "test_split: test\n",
503
+ "fewshot_split: dev\n",
504
+ "fewshot_config:\n",
505
+ " sampler: first_n\n",
506
+ "output_type: multiple_choice\n",
507
+ "doc_to_text: \"{{question.strip()}}\\nA. {{choices[0]}}\\nB. {{choices[1]}}\\nC. {{choices[2]}}\\nD. {{choices[3]}}\\nAnswer:\"\n",
508
+ "doc_to_choice: [\"A\", \"B\", \"C\", \"D\"]\n",
509
+ "doc_to_target: answer\n",
510
+ "metric_list:\n",
511
+ " - metric: acc\n",
512
+ " aggregation: mean\n",
513
+ " higher_is_better: true\n",
514
+ " - metric: acc_norm\n",
515
+ " aggregation: mean\n",
516
+ " higher_is_better: true\n",
517
+ "'''\n",
518
+ "with open('mmlu_high_school_geography.yaml', 'w') as f:\n",
519
+ " f.write(YAML_mmlu_geo_string)\n"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": 8,
525
+ "metadata": {
526
+ "id": "jyKOfCsKb-xy"
527
+ },
528
+ "outputs": [
529
+ {
530
+ "name": "stdout",
531
+ "output_type": "stream",
532
+ "text": [
533
+ "2023-11-29:11:57:23,598 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
534
+ "2023-11-29 11:57:24.719750: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
535
+ "2023-11-29 11:57:24.719806: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
536
+ "2023-11-29 11:57:24.719847: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
537
+ "2023-11-29 11:57:26.656125: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
538
+ "2023-11-29:11:57:31,563 INFO [__main__.py:132] Verbosity set to INFO\n",
539
+ "2023-11-29:11:57:40,541 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
540
+ "2023-11-29:11:57:40,541 INFO [__main__.py:143] Including path: ./\n",
541
+ "2023-11-29:11:57:40,558 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography']\n",
542
+ "2023-11-29:11:57:40,559 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
543
+ "2023-11-29:11:57:40,589 INFO [huggingface.py:120] Using device 'cuda'\n",
544
+ "Downloading builder script: 100% 5.84k/5.84k [00:00<00:00, 17.7MB/s]\n",
545
+ "Downloading metadata: 100% 106k/106k [00:00<00:00, 892kB/s] \n",
546
+ "Downloading readme: 100% 39.7k/39.7k [00:00<00:00, 631kB/s]\n",
547
+ "Downloading data: 100% 166M/166M [00:01<00:00, 89.0MB/s]\n",
548
+ "Generating auxiliary_train split: 100% 99842/99842 [00:07<00:00, 12536.83 examples/s]\n",
549
+ "Generating test split: 100% 198/198 [00:00<00:00, 1439.20 examples/s]\n",
550
+ "Generating validation split: 100% 22/22 [00:00<00:00, 4181.76 examples/s]\n",
551
+ "Generating dev split: 100% 5/5 [00:00<00:00, 36.25 examples/s]\n",
552
+ "2023-11-29:11:58:09,798 INFO [task.py:355] Building contexts for task on rank 0...\n",
553
+ "2023-11-29:11:58:09,822 INFO [evaluator.py:319] Running loglikelihood requests\n",
554
+ "100% 40/40 [00:05<00:00, 7.86it/s]\n",
555
+ "fatal: not a git repository (or any of the parent directories): .git\n",
556
+ "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
557
+ "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
558
+ "|-------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
559
+ "|demo_mmlu_high_school_geography|Yaml |none | 0|acc | 0.3|± |0.1528|\n",
560
+ "| | |none | 0|acc_norm| 0.3|± |0.1528|\n",
561
+ "\n"
562
+ ]
563
+ }
564
+ ],
565
+ "source": [
566
+ "# !accelerate launch --no_python\n",
567
+ "!lm_eval \\\n",
568
+ " --model hf \\\n",
569
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
570
+ " --include_path ./ \\\n",
571
+ " --tasks demo_mmlu_high_school_geography \\\n",
572
+ " --limit 10 \\\n",
573
+ " --output output/mmlu_high_school_geography/ \\\n",
574
+ " --log_samples"
575
+ ]
576
+ },
577
+ {
578
+ "cell_type": "markdown",
579
+ "metadata": {
580
+ "id": "jyKOfCsKb-xy"
581
+ },
582
+ "source": [
583
+ "We could also evaluate this task in a different way. For example, instead of observing the loglikelihood of the letters, we can instead evaluate on the choices themselves as the continuation. This is done by simply changing `doc_to_choice` from a list of letters to the corresponding `choices` field from the HF dataset. We write `\"{{choices}}\"` so that the string field is interpreted as jinja string that acquires the list from the HF dataset directly.\n",
584
+ "\n",
585
+ "Another convenient feature here is since we're only modifying the `doc_to_choice` and the rest of config is the same as the task above, we can use the above configuration as a template by using `include: mmlu_high_school_geography.yaml` to load the config from that file. We'll need to add a unique task name as to not colide with the existing yaml config we're including. For this case we'll simply name this one `mmlu_high_school_geography_continuation`. `doc_to_text` is added here just for sake of clarity."
586
+ ]
587
+ },
588
+ {
589
+ "cell_type": "code",
590
+ "execution_count": 9,
591
+ "metadata": {
592
+ "id": "lqElwU54TaK-"
593
+ },
594
+ "outputs": [],
595
+ "source": [
596
+ "YAML_mmlu_geo_string = '''\n",
597
+ "include: mmlu_high_school_geography.yaml\n",
598
+ "task: demo_mmlu_high_school_geography_continuation\n",
599
+ "doc_to_text: \"{{question.strip()}}\\nA. {{choices[0]}}\\nB. {{choices[1]}}\\nC. {{choices[2]}}\\nD. {{choices[3]}}\\nAnswer:\"\n",
600
+ "doc_to_choice: \"{{choices}}\"\n",
601
+ "'''\n",
602
+ "with open('mmlu_high_school_geography_continuation.yaml', 'w') as f:\n",
603
+ " f.write(YAML_mmlu_geo_string)\n"
604
+ ]
605
+ },
606
+ {
607
+ "cell_type": "code",
608
+ "execution_count": 10,
609
+ "metadata": {
610
+ "id": "-_CVnDirdy7j"
611
+ },
612
+ "outputs": [
613
+ {
614
+ "name": "stdout",
615
+ "output_type": "stream",
616
+ "text": [
617
+ "2023-11-29:11:58:21,284 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
618
+ "2023-11-29 11:58:22.850159: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
619
+ "2023-11-29 11:58:22.850219: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
620
+ "2023-11-29 11:58:22.850254: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
621
+ "2023-11-29 11:58:24.948103: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
622
+ "2023-11-29:11:58:28,460 INFO [__main__.py:132] Verbosity set to INFO\n",
623
+ "2023-11-29:11:58:37,935 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
624
+ "2023-11-29:11:58:37,935 INFO [__main__.py:143] Including path: ./\n",
625
+ "2023-11-29:11:58:37,969 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_continuation']\n",
626
+ "2023-11-29:11:58:37,972 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
627
+ "2023-11-29:11:58:38,008 INFO [huggingface.py:120] Using device 'cuda'\n",
628
+ "2023-11-29:11:58:59,758 INFO [task.py:355] Building contexts for task on rank 0...\n",
629
+ "2023-11-29:11:58:59,777 INFO [evaluator.py:319] Running loglikelihood requests\n",
630
+ "100% 40/40 [00:02<00:00, 16.23it/s]\n",
631
+ "fatal: not a git repository (or any of the parent directories): .git\n",
632
+ "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
633
+ "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
634
+ "|--------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
635
+ "|demo_mmlu_high_school_geography_continuation|Yaml |none | 0|acc | 0.1|± |0.1000|\n",
636
+ "| | |none | 0|acc_norm| 0.2|± |0.1333|\n",
637
+ "\n"
638
+ ]
639
+ }
640
+ ],
641
+ "source": [
642
+ "# !accelerate launch --no_python\n",
643
+ "!lm_eval \\\n",
644
+ " --model hf \\\n",
645
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
646
+ " --include_path ./ \\\n",
647
+ " --tasks demo_mmlu_high_school_geography_continuation \\\n",
648
+ " --limit 10 \\\n",
649
+ " --output output/mmlu_high_school_geography_continuation/ \\\n",
650
+ " --log_samples\n"
651
+ ]
652
+ },
653
+ {
654
+ "cell_type": "markdown",
655
+ "metadata": {
656
+ "id": "-_CVnDirdy7j"
657
+ },
658
+ "source": [
659
+ "If we take a look at the samples, we can see that it is in fact evaluating the continuation based on the choices rather than the letters."
660
+ ]
661
+ },
662
+ {
663
+ "cell_type": "code",
664
+ "execution_count": 11,
665
+ "metadata": {
666
+ "id": "duBDqC6PAdjL"
667
+ },
668
+ "outputs": [
669
+ {
670
+ "data": {
671
+ "application/javascript": "\n ((filepath) => {{\n if (!google.colab.kernel.accessAllowed) {{\n return;\n }}\n google.colab.files.view(filepath);\n }})(\"/content/output/mmlu_high_school_geography_continuation/pretrained__EleutherAI__pythia-2.8b_demo_mmlu_high_school_geography_continuation.jsonl\")",
672
+ "text/plain": [
673
+ "<IPython.core.display.Javascript object>"
674
+ ]
675
+ },
676
+ "metadata": {},
677
+ "output_type": "display_data"
678
+ }
679
+ ],
680
+ "source": [
681
+ "from google.colab import files\n",
682
+ "files.view(\"output/mmlu_high_school_geography_continuation/pretrained__EleutherAI__pythia-2.8b_demo_mmlu_high_school_geography_continuation.jsonl\")\n"
683
+ ]
684
+ },
685
+ {
686
+ "cell_type": "markdown",
687
+ "metadata": {
688
+ "id": "6p0-KPwAgK5j"
689
+ },
690
+ "source": [
691
+ "## Closer Look at YAML Fields\n",
692
+ "\n",
693
+ "To prepare a task we can simply fill in a YAML config with the relevant information.\n",
694
+ "\n",
695
+ "`output_type`\n",
696
+ "The current provided evaluation types comprise of the following:\n",
697
+ "1. `loglikelihood`: Evaluates the loglikelihood of a continuation, conditioned on some input string.\n",
698
+ "2. `loglikelihood_rolling`: evaluate the loglikelihood of producing a string, conditioned on the empty string. (Used for perplexity evaluations)\n",
699
+ "3. `multiple_choice`: Evaluates loglikelihood among the a number of choices predicted by the model.\n",
700
+ "4. `greedy_until`: Model outputs greedy generation (can be configured to to use beam search and other generation-related parameters)\n",
701
+ "\n",
702
+ "The core prompt revolves around 3 fields.\n",
703
+ "1. `doc_to_text`: Denotes the prompt template that will be used as input to the model.\n",
704
+ "2. `doc_to_choice`: Available choices that will be used as continuation for the model. This is used when the `output_type` is `multiple_choice`, and otherwise can be left as `None`.\n",
705
+ "3. `doc_to_target`: When `output_type` is `multiple_choice`, this can be an index that corresponds to the correct answer, or the answer string itself (must be a subset of `doc_to_choice`). For other tasks, this is expected to be a string. You can fill this field with a feature name from the HF dataset so long as the resulting feature follows the conditioned described.\n",
706
+ "\n",
707
+ "These three fields can be expressed as strings, column names from the source dataset, or as Jinja2 templates that can use fields from the source dataset as variables.\n"
708
+ ]
709
+ },
710
+ {
711
+ "cell_type": "markdown",
712
+ "metadata": {
713
+ "id": "6p0-KPwAgK5j"
714
+ },
715
+ "source": [
716
+ "## What if Jinja is not Sufficient?\n",
717
+ "\n",
718
+ "There can be times where the Jinja2 templating language is not enough to make the prompt we had in mind. There are a few ways to circumvent this limitation:\n",
719
+ "\n",
720
+ "1. Use `!function` operator for the prompt-related fields to pass a python function that takes as input the dataset row, and will output the prompt template component.\n",
721
+ "2. Perform a transformation on the dataset beforehand."
722
+ ]
723
+ },
724
+ {
725
+ "cell_type": "markdown",
726
+ "metadata": {},
727
+ "source": [
728
+ "Below, we show an example of using `!function` to create `doc_to_text` from a python function:"
729
+ ]
730
+ },
731
+ {
732
+ "cell_type": "code",
733
+ "execution_count": 12,
734
+ "metadata": {
735
+ "colab": {
736
+ "base_uri": "https://localhost:8080/"
737
+ },
738
+ "id": "DYZ5c0JhR1lJ",
739
+ "outputId": "ca945235-fb9e-4f17-8bfa-78e7d6ec1490"
740
+ },
741
+ "outputs": [
742
+ {
743
+ "name": "stdout",
744
+ "output_type": "stream",
745
+ "text": [
746
+ "2023-11-29:11:59:08,312 INFO [utils.py:160] NumExpr defaulting to 2 threads.\n",
747
+ "2023-11-29 11:59:09.348327: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
748
+ "2023-11-29 11:59:09.348387: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
749
+ "2023-11-29 11:59:09.348421: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
750
+ "2023-11-29 11:59:10.573752: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
751
+ "2023-11-29:11:59:14,044 INFO [__main__.py:132] Verbosity set to INFO\n",
752
+ "2023-11-29:11:59:23,654 WARNING [__main__.py:138] --limit SHOULD ONLY BE USED FOR TESTING.REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.\n",
753
+ "2023-11-29:11:59:23,654 INFO [__main__.py:143] Including path: ./\n",
754
+ "2023-11-29:11:59:23,678 INFO [__main__.py:205] Selected Tasks: ['demo_mmlu_high_school_geography_function_prompt']\n",
755
+ "2023-11-29:11:59:23,679 WARNING [evaluator.py:93] generation_kwargs specified through cli, these settings will be used over set parameters in yaml tasks.\n",
756
+ "2023-11-29:11:59:23,708 INFO [huggingface.py:120] Using device 'cuda'\n",
757
+ "2023-11-29:11:59:44,516 INFO [task.py:355] Building contexts for task on rank 0...\n",
758
+ "2023-11-29:11:59:44,524 INFO [evaluator.py:319] Running loglikelihood requests\n",
759
+ "100% 40/40 [00:02<00:00, 15.41it/s]\n",
760
+ "fatal: not a git repository (or any of the parent directories): .git\n",
761
+ "hf (pretrained=EleutherAI/pythia-2.8b), gen_kwargs: (), limit: 10.0, num_fewshot: None, batch_size: 1\n",
762
+ "| Tasks |Version|Filter|n-shot| Metric |Value| |Stderr|\n",
763
+ "|-----------------------------------------------|-------|------|-----:|--------|----:|---|-----:|\n",
764
+ "|demo_mmlu_high_school_geography_function_prompt|Yaml |none | 0|acc | 0.1|± |0.1000|\n",
765
+ "| | |none | 0|acc_norm| 0.2|± |0.1333|\n",
766
+ "\n"
767
+ ]
768
+ }
769
+ ],
770
+ "source": [
771
+ "YAML_mmlu_geo_string = '''\n",
772
+ "include: mmlu_high_school_geography.yaml\n",
773
+ "task: demo_mmlu_high_school_geography_function_prompt\n",
774
+ "doc_to_text: !function utils.doc_to_text\n",
775
+ "doc_to_choice: \"{{choices}}\"\n",
776
+ "'''\n",
777
+ "with open('demo_mmlu_high_school_geography_function_prompt.yaml', 'w') as f:\n",
778
+ " f.write(YAML_mmlu_geo_string)\n",
779
+ "\n",
780
+ "DOC_TO_TEXT = '''\n",
781
+ "def doc_to_text(x):\n",
782
+ " question = x[\"question\"].strip()\n",
783
+ " choices = x[\"choices\"]\n",
784
+ " option_a = choices[0]\n",
785
+ " option_b = choices[1]\n",
786
+ " option_c = choices[2]\n",
787
+ " option_d = choices[3]\n",
788
+ " return f\"{question}\\\\nA. {option_a}\\\\nB. {option_b}\\\\nC. {option_c}\\\\nD. {option_d}\\\\nAnswer:\"\n",
789
+ "'''\n",
790
+ "with open('utils.py', 'w') as f:\n",
791
+ " f.write(DOC_TO_TEXT)\n",
792
+ "\n",
793
+ "!lm_eval \\\n",
794
+ " --model hf \\\n",
795
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
796
+ " --include_path ./ \\\n",
797
+ " --tasks demo_mmlu_high_school_geography_function_prompt \\\n",
798
+ " --limit 10 \\\n",
799
+ " --output output/demo_mmlu_high_school_geography_function_prompt/ \\\n",
800
+ " --log_samples\n"
801
+ ]
802
+ },
803
+ {
804
+ "cell_type": "markdown",
805
+ "metadata": {},
806
+ "source": [
807
+ "Next, we'll also show how to do this via preprocessing the dataset as necessary using the `process_docs` config field:\n",
808
+ "\n",
809
+ "We will write a function that will modify each document in our evaluation dataset's split to add a field that is suitable for us to use in `doc_to_text`."
810
+ ]
811
+ },
812
+ {
813
+ "cell_type": "code",
814
+ "execution_count": null,
815
+ "metadata": {},
816
+ "outputs": [],
817
+ "source": [
818
+ "YAML_mmlu_geo_string = '''\n",
819
+ "include: mmlu_high_school_geography.yaml\n",
820
+ "task: demo_mmlu_high_school_geography_function_prompt_2\n",
821
+ "process_docs: !function utils_process_docs.process_docs\n",
822
+ "doc_to_text: \"{{input}}\"\n",
823
+ "doc_to_choice: \"{{choices}}\"\n",
824
+ "'''\n",
825
+ "with open('demo_mmlu_high_school_geography_process_docs.yaml', 'w') as f:\n",
826
+ " f.write(YAML_mmlu_geo_string)\n",
827
+ "\n",
828
+ "DOC_TO_TEXT = '''\n",
829
+ "def process_docs(dataset):\n",
830
+ " def _process_doc(x):\n",
831
+ " question = x[\"question\"].strip()\n",
832
+ " choices = x[\"choices\"]\n",
833
+ " option_a = choices[0]\n",
834
+ " option_b = choices[1]\n",
835
+ " option_c = choices[2]\n",
836
+ " option_d = choices[3]\n",
837
+ " doc[\"input\"] = f\"{question}\\\\nA. {option_a}\\\\nB. {option_b}\\\\nC. {option_c}\\\\nD. {option_d}\\\\nAnswer:\"\n",
838
+ " return out_doc\n",
839
+ "\n",
840
+ " return dataset.map(_process_doc)\n",
841
+ "'''\n",
842
+ "\n",
843
+ "with open('utils_process_docs.py', 'w') as f:\n",
844
+ " f.write(DOC_TO_TEXT)\n",
845
+ "\n",
846
+ "!lm_eval \\\n",
847
+ " --model hf \\\n",
848
+ " --model_args pretrained=EleutherAI/pythia-2.8b \\\n",
849
+ " --include_path ./ \\\n",
850
+ " --tasks demo_mmlu_high_school_geography_function_prompt_2 \\\n",
851
+ " --limit 10 \\\n",
852
+ " --output output/demo_mmlu_high_school_geography_function_prompt_2/ \\\n",
853
+ " --log_samples\n"
854
+ ]
855
+ },
856
+ {
857
+ "cell_type": "markdown",
858
+ "metadata": {},
859
+ "source": [
860
+ "We hope that this explainer gives you a sense of what can be done with and how to work with LM-Evaluation-Harnes v0.4.0 ! \n",
861
+ "\n",
862
+ "For more information, check out our documentation pages in the `docs/` folder, and if you have questions, please raise them in GitHub issues, or in #lm-thunderdome or #release-discussion on the EleutherAI discord server."
863
+ ]
864
+ }
865
+ ],
866
+ "metadata": {
867
+ "accelerator": "GPU",
868
+ "colab": {
869
+ "collapsed_sections": [
870
+ "zAov81vTbL2K"
871
+ ],
872
+ "gpuType": "T4",
873
+ "provenance": []
874
+ },
875
+ "kernelspec": {
876
+ "display_name": "Python 3",
877
+ "name": "python3"
878
+ },
879
+ "language_info": {
880
+ "name": "python"
881
+ },
882
+ "widgets": {
883
+ "application/vnd.jupyter.widget-state+json": {
884
+ "46f521b73fd943c081c648fd873ebc0a": {
885
+ "model_module": "@jupyter-widgets/controls",
886
+ "model_module_version": "1.5.0",
887
+ "model_name": "DescriptionStyleModel",
888
+ "state": {
889
+ "_model_module": "@jupyter-widgets/controls",
890
+ "_model_module_version": "1.5.0",
891
+ "_model_name": "DescriptionStyleModel",
892
+ "_view_count": null,
893
+ "_view_module": "@jupyter-widgets/base",
894
+ "_view_module_version": "1.2.0",
895
+ "_view_name": "StyleView",
896
+ "description_width": ""
897
+ }
898
+ },
899
+ "48763b6233374554ae76035c0483066f": {
900
+ "model_module": "@jupyter-widgets/controls",
901
+ "model_module_version": "1.5.0",
902
+ "model_name": "ProgressStyleModel",
903
+ "state": {
904
+ "_model_module": "@jupyter-widgets/controls",
905
+ "_model_module_version": "1.5.0",
906
+ "_model_name": "ProgressStyleModel",
907
+ "_view_count": null,
908
+ "_view_module": "@jupyter-widgets/base",
909
+ "_view_module_version": "1.2.0",
910
+ "_view_name": "StyleView",
911
+ "bar_color": null,
912
+ "description_width": ""
913
+ }
914
+ },
915
+ "4986a21eb560448fa79f4b25cde48951": {
916
+ "model_module": "@jupyter-widgets/base",
917
+ "model_module_version": "1.2.0",
918
+ "model_name": "LayoutModel",
919
+ "state": {
920
+ "_model_module": "@jupyter-widgets/base",
921
+ "_model_module_version": "1.2.0",
922
+ "_model_name": "LayoutModel",
923
+ "_view_count": null,
924
+ "_view_module": "@jupyter-widgets/base",
925
+ "_view_module_version": "1.2.0",
926
+ "_view_name": "LayoutView",
927
+ "align_content": null,
928
+ "align_items": null,
929
+ "align_self": null,
930
+ "border": null,
931
+ "bottom": null,
932
+ "display": null,
933
+ "flex": null,
934
+ "flex_flow": null,
935
+ "grid_area": null,
936
+ "grid_auto_columns": null,
937
+ "grid_auto_flow": null,
938
+ "grid_auto_rows": null,
939
+ "grid_column": null,
940
+ "grid_gap": null,
941
+ "grid_row": null,
942
+ "grid_template_areas": null,
943
+ "grid_template_columns": null,
944
+ "grid_template_rows": null,
945
+ "height": null,
946
+ "justify_content": null,
947
+ "justify_items": null,
948
+ "left": null,
949
+ "margin": null,
950
+ "max_height": null,
951
+ "max_width": null,
952
+ "min_height": null,
953
+ "min_width": null,
954
+ "object_fit": null,
955
+ "object_position": null,
956
+ "order": null,
957
+ "overflow": null,
958
+ "overflow_x": null,
959
+ "overflow_y": null,
960
+ "padding": null,
961
+ "right": null,
962
+ "top": null,
963
+ "visibility": null,
964
+ "width": null
965
+ }
966
+ },
967
+ "6b2d90209ec14230b3d58a74ac9b83bf": {
968
+ "model_module": "@jupyter-widgets/base",
969
+ "model_module_version": "1.2.0",
970
+ "model_name": "LayoutModel",
971
+ "state": {
972
+ "_model_module": "@jupyter-widgets/base",
973
+ "_model_module_version": "1.2.0",
974
+ "_model_name": "LayoutModel",
975
+ "_view_count": null,
976
+ "_view_module": "@jupyter-widgets/base",
977
+ "_view_module_version": "1.2.0",
978
+ "_view_name": "LayoutView",
979
+ "align_content": null,
980
+ "align_items": null,
981
+ "align_self": null,
982
+ "border": null,
983
+ "bottom": null,
984
+ "display": null,
985
+ "flex": null,
986
+ "flex_flow": null,
987
+ "grid_area": null,
988
+ "grid_auto_columns": null,
989
+ "grid_auto_flow": null,
990
+ "grid_auto_rows": null,
991
+ "grid_column": null,
992
+ "grid_gap": null,
993
+ "grid_row": null,
994
+ "grid_template_areas": null,
995
+ "grid_template_columns": null,
996
+ "grid_template_rows": null,
997
+ "height": null,
998
+ "justify_content": null,
999
+ "justify_items": null,
1000
+ "left": null,
1001
+ "margin": null,
1002
+ "max_height": null,
1003
+ "max_width": null,
1004
+ "min_height": null,
1005
+ "min_width": null,
1006
+ "object_fit": null,
1007
+ "object_position": null,
1008
+ "order": null,
1009
+ "overflow": null,
1010
+ "overflow_x": null,
1011
+ "overflow_y": null,
1012
+ "padding": null,
1013
+ "right": null,
1014
+ "top": null,
1015
+ "visibility": null,
1016
+ "width": null
1017
+ }
1018
+ },
1019
+ "7c5689bc13684db8a22681f41863dddd": {
1020
+ "model_module": "@jupyter-widgets/base",
1021
+ "model_module_version": "1.2.0",
1022
+ "model_name": "LayoutModel",
1023
+ "state": {
1024
+ "_model_module": "@jupyter-widgets/base",
1025
+ "_model_module_version": "1.2.0",
1026
+ "_model_name": "LayoutModel",
1027
+ "_view_count": null,
1028
+ "_view_module": "@jupyter-widgets/base",
1029
+ "_view_module_version": "1.2.0",
1030
+ "_view_name": "LayoutView",
1031
+ "align_content": null,
1032
+ "align_items": null,
1033
+ "align_self": null,
1034
+ "border": null,
1035
+ "bottom": null,
1036
+ "display": null,
1037
+ "flex": null,
1038
+ "flex_flow": null,
1039
+ "grid_area": null,
1040
+ "grid_auto_columns": null,
1041
+ "grid_auto_flow": null,
1042
+ "grid_auto_rows": null,
1043
+ "grid_column": null,
1044
+ "grid_gap": null,
1045
+ "grid_row": null,
1046
+ "grid_template_areas": null,
1047
+ "grid_template_columns": null,
1048
+ "grid_template_rows": null,
1049
+ "height": null,
1050
+ "justify_content": null,
1051
+ "justify_items": null,
1052
+ "left": null,
1053
+ "margin": null,
1054
+ "max_height": null,
1055
+ "max_width": null,
1056
+ "min_height": null,
1057
+ "min_width": null,
1058
+ "object_fit": null,
1059
+ "object_position": null,
1060
+ "order": null,
1061
+ "overflow": null,
1062
+ "overflow_x": null,
1063
+ "overflow_y": null,
1064
+ "padding": null,
1065
+ "right": null,
1066
+ "top": null,
1067
+ "visibility": null,
1068
+ "width": null
1069
+ }
1070
+ },
1071
+ "a1d3a8aa016544a78e8821c8f6199e06": {
1072
+ "model_module": "@jupyter-widgets/controls",
1073
+ "model_module_version": "1.5.0",
1074
+ "model_name": "HBoxModel",
1075
+ "state": {
1076
+ "_dom_classes": [],
1077
+ "_model_module": "@jupyter-widgets/controls",
1078
+ "_model_module_version": "1.5.0",
1079
+ "_model_name": "HBoxModel",
1080
+ "_view_count": null,
1081
+ "_view_module": "@jupyter-widgets/controls",
1082
+ "_view_module_version": "1.5.0",
1083
+ "_view_name": "HBoxView",
1084
+ "box_style": "",
1085
+ "children": [
1086
+ "IPY_MODEL_f61ed33fad754146bdd2ac9db1ba1c48",
1087
+ "IPY_MODEL_bfa0af6aeff344c6845e1080a878e92e",
1088
+ "IPY_MODEL_fd1ad9e0367d4004aae853b91c3a7617"
1089
+ ],
1090
+ "layout": "IPY_MODEL_6b2d90209ec14230b3d58a74ac9b83bf"
1091
+ }
1092
+ },
1093
+ "a73f357065d34d7baf0453ae4a8d75e2": {
1094
+ "model_module": "@jupyter-widgets/base",
1095
+ "model_module_version": "1.2.0",
1096
+ "model_name": "LayoutModel",
1097
+ "state": {
1098
+ "_model_module": "@jupyter-widgets/base",
1099
+ "_model_module_version": "1.2.0",
1100
+ "_model_name": "LayoutModel",
1101
+ "_view_count": null,
1102
+ "_view_module": "@jupyter-widgets/base",
1103
+ "_view_module_version": "1.2.0",
1104
+ "_view_name": "LayoutView",
1105
+ "align_content": null,
1106
+ "align_items": null,
1107
+ "align_self": null,
1108
+ "border": null,
1109
+ "bottom": null,
1110
+ "display": null,
1111
+ "flex": null,
1112
+ "flex_flow": null,
1113
+ "grid_area": null,
1114
+ "grid_auto_columns": null,
1115
+ "grid_auto_flow": null,
1116
+ "grid_auto_rows": null,
1117
+ "grid_column": null,
1118
+ "grid_gap": null,
1119
+ "grid_row": null,
1120
+ "grid_template_areas": null,
1121
+ "grid_template_columns": null,
1122
+ "grid_template_rows": null,
1123
+ "height": null,
1124
+ "justify_content": null,
1125
+ "justify_items": null,
1126
+ "left": null,
1127
+ "margin": null,
1128
+ "max_height": null,
1129
+ "max_width": null,
1130
+ "min_height": null,
1131
+ "min_width": null,
1132
+ "object_fit": null,
1133
+ "object_position": null,
1134
+ "order": null,
1135
+ "overflow": null,
1136
+ "overflow_x": null,
1137
+ "overflow_y": null,
1138
+ "padding": null,
1139
+ "right": null,
1140
+ "top": null,
1141
+ "visibility": null,
1142
+ "width": null
1143
+ }
1144
+ },
1145
+ "aed3acd2f2d74003b44079c333a0698e": {
1146
+ "model_module": "@jupyter-widgets/controls",
1147
+ "model_module_version": "1.5.0",
1148
+ "model_name": "DescriptionStyleModel",
1149
+ "state": {
1150
+ "_model_module": "@jupyter-widgets/controls",
1151
+ "_model_module_version": "1.5.0",
1152
+ "_model_name": "DescriptionStyleModel",
1153
+ "_view_count": null,
1154
+ "_view_module": "@jupyter-widgets/base",
1155
+ "_view_module_version": "1.2.0",
1156
+ "_view_name": "StyleView",
1157
+ "description_width": ""
1158
+ }
1159
+ },
1160
+ "bfa0af6aeff344c6845e1080a878e92e": {
1161
+ "model_module": "@jupyter-widgets/controls",
1162
+ "model_module_version": "1.5.0",
1163
+ "model_name": "FloatProgressModel",
1164
+ "state": {
1165
+ "_dom_classes": [],
1166
+ "_model_module": "@jupyter-widgets/controls",
1167
+ "_model_module_version": "1.5.0",
1168
+ "_model_name": "FloatProgressModel",
1169
+ "_view_count": null,
1170
+ "_view_module": "@jupyter-widgets/controls",
1171
+ "_view_module_version": "1.5.0",
1172
+ "_view_name": "ProgressView",
1173
+ "bar_style": "success",
1174
+ "description": "",
1175
+ "description_tooltip": null,
1176
+ "layout": "IPY_MODEL_7c5689bc13684db8a22681f41863dddd",
1177
+ "max": 5669,
1178
+ "min": 0,
1179
+ "orientation": "horizontal",
1180
+ "style": "IPY_MODEL_48763b6233374554ae76035c0483066f",
1181
+ "value": 5669
1182
+ }
1183
+ },
1184
+ "f61ed33fad754146bdd2ac9db1ba1c48": {
1185
+ "model_module": "@jupyter-widgets/controls",
1186
+ "model_module_version": "1.5.0",
1187
+ "model_name": "HTMLModel",
1188
+ "state": {
1189
+ "_dom_classes": [],
1190
+ "_model_module": "@jupyter-widgets/controls",
1191
+ "_model_module_version": "1.5.0",
1192
+ "_model_name": "HTMLModel",
1193
+ "_view_count": null,
1194
+ "_view_module": "@jupyter-widgets/controls",
1195
+ "_view_module_version": "1.5.0",
1196
+ "_view_name": "HTMLView",
1197
+ "description": "",
1198
+ "description_tooltip": null,
1199
+ "layout": "IPY_MODEL_a73f357065d34d7baf0453ae4a8d75e2",
1200
+ "placeholder": "​",
1201
+ "style": "IPY_MODEL_46f521b73fd943c081c648fd873ebc0a",
1202
+ "value": "Downloading builder script: 100%"
1203
+ }
1204
+ },
1205
+ "fd1ad9e0367d4004aae853b91c3a7617": {
1206
+ "model_module": "@jupyter-widgets/controls",
1207
+ "model_module_version": "1.5.0",
1208
+ "model_name": "HTMLModel",
1209
+ "state": {
1210
+ "_dom_classes": [],
1211
+ "_model_module": "@jupyter-widgets/controls",
1212
+ "_model_module_version": "1.5.0",
1213
+ "_model_name": "HTMLModel",
1214
+ "_view_count": null,
1215
+ "_view_module": "@jupyter-widgets/controls",
1216
+ "_view_module_version": "1.5.0",
1217
+ "_view_name": "HTMLView",
1218
+ "description": "",
1219
+ "description_tooltip": null,
1220
+ "layout": "IPY_MODEL_4986a21eb560448fa79f4b25cde48951",
1221
+ "placeholder": "​",
1222
+ "style": "IPY_MODEL_aed3acd2f2d74003b44079c333a0698e",
1223
+ "value": " 5.67k/5.67k [00:00&lt;00:00, 205kB/s]"
1224
+ }
1225
+ }
1226
+ }
1227
+ }
1228
+ },
1229
+ "nbformat": 4,
1230
+ "nbformat_minor": 0
1231
+ }
lm-evaluation/examples/visualize-wandb.ipynb ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "fc477b96-adee-4829-a9d7-a5eb990df358",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Visualizing Results in Weights and Biases\n",
9
+ "\n",
10
+ "With the Weights and Biases integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform.\n",
11
+ "\n",
12
+ "The integration provide functionalities\n",
13
+ "\n",
14
+ "- to automatically log the evaluation results,\n",
15
+ "- log the samples as W&B Tables for easy visualization,\n",
16
+ "- log the `results.json` file as an artifact for version control,\n",
17
+ "- log the `<task_name>_eval_samples.json` file if the samples are logged,\n",
18
+ "- generate a comprehensive report for analysis and visualization with all the important metric,\n",
19
+ "- log task and cli configs,\n",
20
+ "- and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc.\n",
21
+ "\n",
22
+ "The integration is super easy to use with the eval harness. Let's see how!"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": null,
28
+ "id": "3851439a-bff4-41f2-bf21-1b3d8704913b",
29
+ "metadata": {
30
+ "scrolled": true
31
+ },
32
+ "outputs": [],
33
+ "source": [
34
+ "# Install this project if you did not already have it.\n",
35
+ "# This is all that is needed to be installed to start using Weights and Biases\n",
36
+ "\n",
37
+ "!pip -qq install -e ..[wandb]"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "markdown",
42
+ "id": "8507fd7e-3b99-4a92-89fa-9eaada74ba91",
43
+ "metadata": {},
44
+ "source": [
45
+ "# Run the Eval Harness\n",
46
+ "\n",
47
+ "Run the eval harness as usual with a `wandb_args` flag. This flag is used to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments.\n",
48
+ "\n",
49
+ "If `wandb_args` flag is used, the metrics and all other goodness will be automatically logged to Weights and Biases. In the stdout, you will find the link to the W&B run page as well as link to the generated report."
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "markdown",
54
+ "id": "eec5866e-f01e-42f8-8803-9d77472ef991",
55
+ "metadata": {},
56
+ "source": [
57
+ "## Set your API Key\n",
58
+ "\n",
59
+ "Before you can use W&B, you need to authenticate your machine with an authentication key. Visit https://wandb.ai/authorize to get one."
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": null,
65
+ "id": "d824d163-71a9-4313-935d-f1d56397841c",
66
+ "metadata": {},
67
+ "outputs": [],
68
+ "source": [
69
+ "import wandb\n",
70
+ "\n",
71
+ "wandb.login()"
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "markdown",
76
+ "id": "124e4a34-1547-4bed-bc09-db012bacbda6",
77
+ "metadata": {},
78
+ "source": [
79
+ "> Note that if you are using command line you can simply authenticate your machine by doing `wandb login` in your terminal. For more info check out the [documentation](https://docs.wandb.ai/quickstart#2-log-in-to-wb)."
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "markdown",
84
+ "id": "abc6f6b6-179a-4aff-ada9-f380fb74df6e",
85
+ "metadata": {},
86
+ "source": [
87
+ "## Run and log to W&B"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "id": "bd0a8130-a97b-451a-acd2-3f9885b88643",
94
+ "metadata": {},
95
+ "outputs": [],
96
+ "source": [
97
+ "!lm_eval \\\n",
98
+ " --model hf \\\n",
99
+ " --model_args pretrained=microsoft/phi-2,trust_remote_code=True \\\n",
100
+ " --tasks hellaswag,mmlu_abstract_algebra \\\n",
101
+ " --device cuda:0 \\\n",
102
+ " --batch_size 8 \\\n",
103
+ " --output_path output/phi-2 \\\n",
104
+ " --limit 10 \\\n",
105
+ " --wandb_args project=lm-eval-harness-integration \\\n",
106
+ " --log_samples"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "markdown",
111
+ "id": "e974cabdbe70b667",
112
+ "metadata": {},
113
+ "source": ""
114
+ },
115
+ {
116
+ "cell_type": "markdown",
117
+ "id": "5178ca9445b844e4",
118
+ "metadata": {},
119
+ "source": "W&B can also be initialized programmatically for use outside the CLI to parse and log the results."
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": null,
124
+ "id": "c6a421b2cf3ddac5",
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "import lm_eval\n",
129
+ "from lm_eval.logging_utils import WandbLogger\n",
130
+ "\n",
131
+ "results = lm_eval.simple_evaluate(\n",
132
+ " model=\"hf\",\n",
133
+ " model_args=\"pretrained=microsoft/phi-2,trust_remote_code=True\",\n",
134
+ " tasks=\"hellaswag,mmlu_abstract_algebra\",\n",
135
+ " log_samples=True,\n",
136
+ ")\n",
137
+ "\n",
138
+ "wandb_logger = WandbLogger(\n",
139
+ " project=\"lm-eval-harness-integration\", job_type=\"eval\"\n",
140
+ ") # or empty if wandb.init(...) already called before\n",
141
+ "wandb_logger.post_init(results)\n",
142
+ "wandb_logger.log_eval_result()\n",
143
+ "wandb_logger.log_eval_samples(results[\"samples\"]) # if log_samples"
144
+ ]
145
+ }
146
+ ],
147
+ "metadata": {
148
+ "kernelspec": {
149
+ "display_name": "Python 3 (ipykernel)",
150
+ "language": "python",
151
+ "name": "python3"
152
+ },
153
+ "language_info": {
154
+ "codemirror_mode": {
155
+ "name": "ipython",
156
+ "version": 3
157
+ },
158
+ "file_extension": ".py",
159
+ "mimetype": "text/x-python",
160
+ "name": "python",
161
+ "nbconvert_exporter": "python",
162
+ "pygments_lexer": "ipython3",
163
+ "version": "3.10.12"
164
+ }
165
+ },
166
+ "nbformat": 4,
167
+ "nbformat_minor": 5
168
+ }
lm-evaluation/examples/visualize-zeno.ipynb ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Visualizing Results in Zeno\n",
8
+ "\n",
9
+ "Benchmarking your models is the first step towards making sure your model performs well.\n",
10
+ "However, looking at the data behind the benchmark, slicing the data into subsets, and comparing models on individual instances can help you even more in evaluating and quantifying the behavior of your AI system.\n",
11
+ "\n",
12
+ "All of this can be done in [Zeno](https://zenoml.com)!\n",
13
+ "Zeno is super easy to use with the eval harness, let's explore how you can easily upload and visualize your eval results.\n"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": null,
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "# Install this project if you did not already do that. This is all that needs to be installed for you to be able to visualize your data in Zeno!\n",
23
+ "!pip install -e ..\n",
24
+ "!pip install -e ..[zeno]"
25
+ ]
26
+ },
27
+ {
28
+ "cell_type": "markdown",
29
+ "metadata": {},
30
+ "source": [
31
+ "# Run the Eval Harness\n",
32
+ "\n",
33
+ "To visualize the results, run the eval harness with the `log_samples` and `output_path` flags. We expect `output_path` to contain multiple folders that represent individual model names. You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno.\n"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": null,
39
+ "metadata": {},
40
+ "outputs": [],
41
+ "source": [
42
+ "!lm_eval \\\n",
43
+ " --model hf \\\n",
44
+ " --model_args pretrained=EleutherAI/gpt-neo-2.7B \\\n",
45
+ " --tasks hellaswag,wikitext \\\n",
46
+ " --batch_size 8 \\\n",
47
+ " --device mps \\\n",
48
+ " --log_samples \\\n",
49
+ " --output_path output/gpt-neo-2.7B \\\n",
50
+ " --limit 10"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "markdown",
55
+ "metadata": {},
56
+ "source": [
57
+ "# Set your API Key\n",
58
+ "\n",
59
+ "This is so you can be authenticated with Zeno.\n",
60
+ "If you don't already have a Zeno account, first create an account on [Zeno Hub](https://hub.zenoml.com).\n",
61
+ "After logging in to Zeno Hub, generate your API key by clicking on your profile at the bottom left to navigate to your account page.\n"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": null,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "%env ZENO_API_KEY=YOUR_API_KEY"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "markdown",
75
+ "metadata": {},
76
+ "source": [
77
+ "# Visualize Eval Results\n",
78
+ "\n",
79
+ "You can now use the `zeno_visualize` script to upload the results to Zeno.\n",
80
+ "\n",
81
+ "This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno. If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task.\n"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": null,
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "!python ../scripts/zeno_visualize.py --data_path output --project_name \"Zeno Upload Test\""
91
+ ]
92
+ }
93
+ ],
94
+ "metadata": {
95
+ "kernelspec": {
96
+ "display_name": "zeno_projects",
97
+ "language": "python",
98
+ "name": "python3"
99
+ },
100
+ "language_info": {
101
+ "codemirror_mode": {
102
+ "name": "ipython",
103
+ "version": 3
104
+ },
105
+ "file_extension": ".py",
106
+ "mimetype": "text/x-python",
107
+ "name": "python",
108
+ "nbconvert_exporter": "python",
109
+ "pygments_lexer": "ipython3",
110
+ "version": "3.10.11"
111
+ }
112
+ },
113
+ "nbformat": 4,
114
+ "nbformat_minor": 2
115
+ }
lm-evaluation/lm_eval/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (221 Bytes). View file
 
lm-evaluation/lm_eval/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
lm-evaluation/lm_eval/__pycache__/evaluator.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
lm-evaluation/lm_eval/__pycache__/evaluator_utils.cpython-310.pyc ADDED
Binary file (9.77 kB). View file
 
lm-evaluation/lm_eval/__pycache__/logging_utils.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
lm-evaluation/lm_eval/__pycache__/utils.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
lm-evaluation/lm_eval/caching/__pycache__/cache.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
lm-evaluation/lm_eval/models/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import (
2
+ anthropic_llms,
3
+ dummy,
4
+ gguf,
5
+ huggingface,
6
+ mamba_lm,
7
+ nemo_lm,
8
+ neuron_optimum,
9
+ openai_completions,
10
+ optimum_lm,
11
+ textsynth,
12
+ vllm_causallms,
13
+ )
14
+
15
+
16
+ # TODO: implement __all__
17
+
18
+
19
+ try:
20
+ # enable hf hub transfer if available
21
+ import hf_transfer # type: ignore # noqa
22
+ import huggingface_hub.constants # type: ignore
23
+
24
+ huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True
25
+ except ImportError:
26
+ pass
lm-evaluation/lm_eval/models/__pycache__/anthropic_llms.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/dummy.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/gguf.cpython-310.pyc ADDED
Binary file (4.1 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/huggingface.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/mamba_lm.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/nemo_lm.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/neuron_optimum.cpython-310.pyc ADDED
Binary file (18.3 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/openai_completions.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/optimum_lm.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/textsynth.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/utils.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
lm-evaluation/lm_eval/models/__pycache__/vllm_causallms.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
lm-evaluation/lm_eval/models/anthropic_llms.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Tuple
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval import utils
6
+ from lm_eval.api.model import LM
7
+ from lm_eval.api.registry import register_model
8
+ from lm_eval.models.utils import retry_on_specific_exceptions
9
+
10
+
11
+ eval_logger = utils.eval_logger
12
+
13
+
14
+ def anthropic_completion(
15
+ client, #: anthropic.Anthropic,
16
+ model: str,
17
+ prompt: str,
18
+ max_tokens_to_sample: int,
19
+ temperature: float,
20
+ stop: List[str],
21
+ **kwargs: Any,
22
+ ) -> str:
23
+ """Wrapper function around the Anthropic completion API client with exponential back-off
24
+ in case of RateLimitError.
25
+
26
+ params:
27
+ client: anthropic.Anthropic
28
+ Anthropic API client
29
+ model: str
30
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
31
+ prompt: str
32
+ Prompt to feed to the model
33
+ max_tokens_to_sample: int
34
+ Maximum number of tokens to sample from the model
35
+ temperature: float
36
+ Sampling temperature
37
+ stop: List[str]
38
+ List of stop sequences
39
+ kwargs: Any
40
+ Additional model_args to pass to the API client
41
+ """
42
+
43
+ try:
44
+ import anthropic
45
+ except ModuleNotFoundError:
46
+ raise Exception(
47
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
48
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
49
+ )
50
+
51
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
52
+ eval_logger.warning(
53
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
54
+ )
55
+
56
+ @retry_on_specific_exceptions(
57
+ on_exceptions=[anthropic.RateLimitError],
58
+ max_retries=None, # retry forever, consider changing
59
+ on_exception_callback=_exception_callback,
60
+ )
61
+ def completion():
62
+ response = client.completions.create(
63
+ prompt=f"{anthropic.HUMAN_PROMPT} {prompt}{anthropic.AI_PROMPT}",
64
+ model=model,
65
+ # NOTE: Claude really likes to do CoT, and overly aggressive stop sequences
66
+ # (e.g. gsm8k's ":") may truncate a lot of the input.
67
+ stop_sequences=[anthropic.HUMAN_PROMPT] + stop,
68
+ max_tokens_to_sample=max_tokens_to_sample,
69
+ temperature=temperature,
70
+ **kwargs,
71
+ )
72
+ return response.completion
73
+
74
+ return completion()
75
+
76
+
77
+ def anthropic_chat(
78
+ client, #: anthropic.Anthropic,
79
+ model: str,
80
+ prompt: str,
81
+ max_tokens: int,
82
+ temperature: float,
83
+ stop: List[str],
84
+ **kwargs: Any,
85
+ ) -> str:
86
+ """Wrapper function around the Anthropic completion API client with exponential back-off
87
+ in case of RateLimitError.
88
+
89
+ params:
90
+ client: anthropic.Anthropic
91
+ Anthropic API client
92
+ model: str
93
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
94
+ prompt: str
95
+ Prompt to feed to the model
96
+ max_tokens: int
97
+ Maximum number of tokens to sample from the model
98
+ temperature: float
99
+ Sampling temperature
100
+ stop: List[str]
101
+ List of stop sequences
102
+ kwargs: Any
103
+ Additional model_args to pass to the API client
104
+ """
105
+
106
+ try:
107
+ import anthropic
108
+ except ModuleNotFoundError:
109
+ raise Exception(
110
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
111
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
112
+ )
113
+
114
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
115
+ eval_logger.warning(
116
+ f"RateLimitError occurred: {e.__cause__}\n Retrying in {sleep_time} seconds"
117
+ )
118
+
119
+ @retry_on_specific_exceptions(
120
+ on_exceptions=[
121
+ anthropic.RateLimitError,
122
+ anthropic.APIConnectionError,
123
+ anthropic.APIStatusError,
124
+ ],
125
+ max_retries=None, # retry forever, consider changing
126
+ on_exception_callback=_exception_callback,
127
+ )
128
+ def messages():
129
+ response = client.messages.create(
130
+ model=model,
131
+ max_tokens=max_tokens,
132
+ temperature=temperature,
133
+ messages=[{"role": "user", "content": f"{prompt}"}],
134
+ **kwargs,
135
+ )
136
+ return response.content[0].text
137
+
138
+ return messages()
139
+
140
+
141
+ @register_model("anthropic")
142
+ class AnthropicLM(LM):
143
+ REQ_CHUNK_SIZE = 20 # TODO: not used
144
+
145
+ def __init__(
146
+ self,
147
+ batch_size: int = 1,
148
+ model: str = "claude-2.0",
149
+ max_tokens_to_sample: int = 256,
150
+ temperature: float = 0, # defaults to 1
151
+ **kwargs, # top_p, top_k, etc.
152
+ ) -> None:
153
+ """Anthropic API wrapper.
154
+
155
+ :param model: str
156
+ Anthropic model e.g. 'claude-instant-v1', 'claude-2'
157
+ :param max_tokens_to_sample: int
158
+ Maximum number of tokens to sample from the model
159
+ :param temperature: float
160
+ Sampling temperature
161
+ :param kwargs: Any
162
+ Additional model_args to pass to the API client
163
+ """
164
+ super().__init__()
165
+
166
+ try:
167
+ import anthropic
168
+ except ModuleNotFoundError:
169
+ raise Exception(
170
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
171
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
172
+ )
173
+
174
+ self.model = model
175
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
176
+ self.client = anthropic.Anthropic()
177
+ self.temperature = temperature
178
+ self.max_tokens_to_sample = max_tokens_to_sample
179
+ self.tokenizer = self.client.get_tokenizer()
180
+ self.kwargs = kwargs
181
+
182
+ @property
183
+ def eot_token_id(self):
184
+ # Not sure but anthropic.HUMAN_PROMPT ?
185
+ raise NotImplementedError("No idea about anthropic tokenization.")
186
+
187
+ @property
188
+ def max_length(self) -> int:
189
+ return 2048
190
+
191
+ @property
192
+ def max_gen_toks(self) -> int:
193
+ return self.max_tokens_to_sample
194
+
195
+ @property
196
+ def batch_size(self):
197
+ # Isn't used because we override _loglikelihood_tokens
198
+ raise NotImplementedError("No support for logits.")
199
+
200
+ @property
201
+ def device(self):
202
+ # Isn't used because we override _loglikelihood_tokens
203
+ raise NotImplementedError("No support for logits.")
204
+
205
+ def tok_encode(self, string: str) -> List[int]:
206
+ return self.tokenizer.encode(string).ids
207
+
208
+ def tok_decode(self, tokens: List[int]) -> str:
209
+ return self.tokenizer.decode(tokens)
210
+
211
+ def _loglikelihood_tokens(self, requests, disable_tqdm: bool = False):
212
+ raise NotImplementedError("No support for logits.")
213
+
214
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
215
+ try:
216
+ import anthropic
217
+ except ModuleNotFoundError:
218
+ raise Exception(
219
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
220
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
221
+ )
222
+
223
+ if not requests:
224
+ return []
225
+
226
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
227
+
228
+ res = []
229
+ for request in tqdm(_requests, disable=disable_tqdm):
230
+ try:
231
+ inp = request[0]
232
+ request_args = request[1]
233
+ # generation_kwargs
234
+ until = request_args.get("until")
235
+ max_gen_toks = request_args.get("max_gen_toks", self.max_length)
236
+ temperature = request_args.get("temperature", self.temperature)
237
+ response = anthropic_completion(
238
+ client=self.client,
239
+ model=self.model,
240
+ prompt=inp,
241
+ max_tokens_to_sample=max_gen_toks,
242
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
243
+ stop=until, # type: ignore
244
+ **self.kwargs,
245
+ )
246
+ res.append(response)
247
+
248
+ self.cache_hook.add_partial("generate_until", request, response)
249
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
250
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
251
+ break
252
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
253
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
254
+ break
255
+
256
+ return res
257
+
258
+ def _model_call(self, inps):
259
+ # Isn't used because we override _loglikelihood_tokens
260
+ raise NotImplementedError()
261
+
262
+ def _model_generate(self, context, max_length, eos_token_id):
263
+ # Isn't used because we override generate_until
264
+ raise NotImplementedError()
265
+
266
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
267
+ raise NotImplementedError("No support for logits.")
268
+
269
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
270
+ raise NotImplementedError("No support for logits.")
271
+
272
+
273
+ @register_model("anthropic-chat", "anthropic-chat-completions")
274
+ class AnthropicChatLM(AnthropicLM):
275
+ REQ_CHUNK_SIZE = 20 # TODO: not used
276
+
277
+ def __init__(
278
+ self,
279
+ model: str,
280
+ batch_size: int = 1,
281
+ max_tokens: int = 256,
282
+ temperature: float = 0, # defaults to 1
283
+ **kwargs, # top_p, top_k, etc.
284
+ ) -> None:
285
+ """Anthropic API wrapper.
286
+
287
+ :param model: str
288
+ Anthropic model e.g. 'claude-3-opus-20240229', 'claude-3-sonnet-20240229'
289
+ :param max_tokens: int
290
+ Maximum number of tokens to sample from the model
291
+ :param temperature: float
292
+ Sampling temperature
293
+ :param kwargs: Any
294
+ Additional model_args to pass to the API client
295
+ """
296
+ super().__init__()
297
+
298
+ try:
299
+ import anthropic
300
+ except ModuleNotFoundError:
301
+ raise Exception(
302
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
303
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
304
+ )
305
+
306
+ self.model = model
307
+ # defaults to os.environ.get("ANTHROPIC_API_KEY")
308
+ self.client = anthropic.Anthropic()
309
+ self.temperature = temperature
310
+ self.max_token = max_tokens
311
+ self.tokenizer = self.client.get_tokenizer()
312
+ self.kwargs = kwargs
313
+
314
+ @property
315
+ def max_gen_toks(self) -> int:
316
+ return self.max_tokens
317
+
318
+ def generate_until(self, requests) -> List[str]:
319
+ try:
320
+ import anthropic
321
+ except ModuleNotFoundError:
322
+ raise Exception(
323
+ "attempted to use 'anthropic' LM type, but package `anthropic` is not installed. \
324
+ please install anthropic via `pip install 'lm-eval[anthropic]'` or `pip install -e '.[anthropic]'`",
325
+ )
326
+
327
+ if not requests:
328
+ return []
329
+
330
+ _requests: List[Tuple[str, dict]] = [req.args for req in requests]
331
+
332
+ res = []
333
+ for request in tqdm(_requests):
334
+ try:
335
+ inp = request[0]
336
+ request_args = request[1]
337
+ # generation_kwargs
338
+ until = request_args.get("until")
339
+ max_tokens = request_args.get("max_gen_toks", self.max_length)
340
+ temperature = request_args.get("temperature", self.temperature)
341
+ response = anthropic_chat(
342
+ client=self.client,
343
+ model=self.model,
344
+ prompt=inp,
345
+ max_tokens=max_tokens,
346
+ temperature=temperature, # TODO: implement non-greedy sampling for Anthropic
347
+ stop=until, # type: ignore
348
+ **self.kwargs,
349
+ )
350
+ res.append(response)
351
+
352
+ self.cache_hook.add_partial("generate_until", request, response)
353
+ except anthropic.APIConnectionError as e: # type: ignore # noqa: F821
354
+ eval_logger.critical(f"Server unreachable: {e.__cause__}")
355
+ break
356
+ except anthropic.APIStatusError as e: # type: ignore # noqa: F821
357
+ eval_logger.critical(f"API error {e.status_code}: {e.message}")
358
+ break
359
+
360
+ return res
lm-evaluation/lm_eval/models/dummy.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ from tqdm import tqdm
4
+
5
+ from lm_eval.api.model import LM
6
+ from lm_eval.api.registry import register_model
7
+
8
+
9
+ @register_model("dummy")
10
+ class DummyLM(LM):
11
+ def __init__(self) -> None:
12
+ super().__init__()
13
+
14
+ @classmethod
15
+ def create_from_arg_string(cls, arg_string, additional_config=None):
16
+ return cls()
17
+
18
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
19
+ res = []
20
+
21
+ for _ in tqdm(requests, disable=disable_tqdm):
22
+ res.append((-random.random(), False))
23
+
24
+ return res
25
+
26
+ def generate_until(self, requests, disable_tqdm: bool = False):
27
+ res = []
28
+
29
+ for ctx, _ in tqdm(requests, disable=disable_tqdm):
30
+ res.append("lol")
31
+ assert ctx.strip() != ""
32
+
33
+ return res
34
+
35
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
36
+ res = []
37
+
38
+ for _ in tqdm(requests, disable=disable_tqdm):
39
+ res.append(-random.random())
40
+
41
+ return res
lm-evaluation/lm_eval/models/gguf.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import time
3
+
4
+ import requests
5
+ from requests.exceptions import RequestException
6
+ from tqdm import tqdm
7
+
8
+ from lm_eval.api.model import LM
9
+ from lm_eval.api.registry import register_model
10
+
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ def get_result(logprobs, context_length):
16
+ is_greedy = True
17
+ offsets = logprobs["text_offset"]
18
+ tokens = logprobs["tokens"]
19
+ tokens_logprobs = logprobs["token_logprobs"]
20
+
21
+ idx = 0
22
+ while offsets[idx] < context_length:
23
+ idx += 1
24
+ continuation_logprobs = sum(tokens_logprobs[idx:-1])
25
+ for i in range(idx, len(tokens)):
26
+ token = tokens[i]
27
+ top_tokens = logprobs["top_logprobs"][i]
28
+ top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
29
+ if top_token != token:
30
+ is_greedy = False
31
+ break
32
+
33
+ return continuation_logprobs, is_greedy
34
+
35
+
36
+ @register_model("gguf", "ggml")
37
+ class GGUFLM(LM):
38
+ def __init__(self, base_url=None, max_length=2048, **kwargs):
39
+ super().__init__()
40
+ self.base_url = base_url
41
+ assert self.base_url, "must pass `base_url` to use GGUF LM!"
42
+ self.logprobs = 10
43
+ self.temperature = 0.0
44
+ self.max_length = max_length
45
+
46
+ def gguf_completion(
47
+ self, context, continuation=None, stop=None, retries=3, delay=5, **kwargs
48
+ ):
49
+ for _ in range(retries):
50
+ try:
51
+ prompt = context
52
+ request = {
53
+ "prompt": prompt,
54
+ "logprobs": self.logprobs,
55
+ "temperature": self.temperature,
56
+ }
57
+ if continuation:
58
+ prompt += continuation
59
+ request.update({"prompt": prompt, "max_tokens": 1, "echo": True})
60
+ if stop is not None:
61
+ request["stop"] = stop
62
+ response = requests.post(
63
+ f"{self.base_url}/v1/completions", json=request
64
+ )
65
+ response.raise_for_status()
66
+ return response.json()
67
+ except RequestException as e:
68
+ logger.error(f"RequestException: {e}")
69
+ time.sleep(delay) # wait before retrying
70
+ else:
71
+ raise Exception(f"Failed to get a valid response after {retries} retries.")
72
+
73
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
74
+ if not requests:
75
+ return []
76
+ res = []
77
+ for context, continuation in tqdm(
78
+ [req.args for req in requests], disable=disable_tqdm
79
+ ):
80
+ response = self.gguf_completion(context=context, continuation=continuation)
81
+ if response and "choices" in response and response["choices"]:
82
+ choice = response["choices"][0]
83
+ logprobs = choice.get("logprobs")
84
+ if (
85
+ logprobs
86
+ and "token_logprobs" in logprobs
87
+ and logprobs["token_logprobs"]
88
+ ):
89
+ logprob, is_greedy = get_result(logprobs, len(context))
90
+ res.append((logprob, is_greedy))
91
+ else:
92
+ logger.warning(
93
+ "Invalid logprobs data. Expected 'logprobs' to contain 'token_logprobs' list."
94
+ )
95
+ else:
96
+ logger.error(
97
+ f"Invalid response for loglikelihood. Response: {response}"
98
+ )
99
+ assert False
100
+ return res
101
+
102
+ def generate_until(self, requests, disable_tqdm: bool = False):
103
+ if not requests:
104
+ return []
105
+
106
+ res = []
107
+ for request in tqdm([req.args for req in requests], disable=disable_tqdm):
108
+ inp = request[0]
109
+ request_args = request[1]
110
+ until = request_args.get("until", ["</s>"])
111
+ response = self.gguf_completion(context=inp, stop=until)
112
+ if response and "choices" in response and response["choices"]:
113
+ choice = response["choices"][0]
114
+ if "text" in choice:
115
+ generated_text = choice["text"].strip()
116
+ res.append(generated_text)
117
+ else:
118
+ logger.error(
119
+ f"Invalid response for greedy_until. Response: {response}"
120
+ )
121
+ res.append(None) # Add default value in case of error
122
+ else:
123
+ logger.error(f"Invalid response for greedy_until. Response: {response}")
124
+ res.append(None) # Add default value in case of error
125
+ return res
126
+
127
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
128
+ raise NotImplementedError(
129
+ "loglikelihood_rolling not yet supported for GGUF models"
130
+ )
lm-evaluation/lm_eval/models/huggingface.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from datetime import timedelta
4
+ from pathlib import Path
5
+ from typing import List, Literal, Optional, Tuple, Union
6
+
7
+ import torch
8
+ import torch.nn.functional as F
9
+ import transformers
10
+ from accelerate import (
11
+ Accelerator,
12
+ DistributedType,
13
+ InitProcessGroupKwargs,
14
+ find_executable_batch_size,
15
+ )
16
+ from packaging import version
17
+ from peft import PeftModel
18
+ from peft import __version__ as PEFT_VERSION
19
+ from tqdm import tqdm
20
+ from transformers.models.auto.modeling_auto import (
21
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
22
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
23
+ )
24
+
25
+ from lm_eval import utils
26
+ from lm_eval.api.instance import Instance
27
+ from lm_eval.api.model import TemplateLM
28
+ from lm_eval.api.registry import register_model
29
+ from lm_eval.models.utils import (
30
+ Collator,
31
+ clear_torch_cache,
32
+ get_dtype,
33
+ pad_and_concat,
34
+ stop_sequences_criteria,
35
+ )
36
+
37
+
38
+ eval_logger = utils.eval_logger
39
+
40
+
41
+ def _get_accelerate_args(
42
+ device_map_option: Optional[str] = "auto",
43
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
44
+ max_cpu_memory: Optional[Union[int, str]] = None,
45
+ offload_folder: Optional[str] = "./offload",
46
+ ) -> dict:
47
+ """Returns the kwargs needed to apply `accelerate` in `AutoModel.from_pretrained`."""
48
+ max_memory = {}
49
+ if max_memory_per_gpu is not None:
50
+ max_memory_per_gpu_map = {
51
+ device_idx: max_memory_per_gpu
52
+ for device_idx in range(torch.cuda.device_count())
53
+ }
54
+ max_memory.update(max_memory_per_gpu_map)
55
+ if max_cpu_memory is not None:
56
+ max_memory["cpu"] = max_cpu_memory
57
+
58
+ args = {}
59
+ if max_memory:
60
+ args["max_memory"] = max_memory
61
+ args["device_map"] = device_map_option
62
+ args["offload_folder"] = offload_folder
63
+ return args
64
+
65
+
66
+ @register_model("hf-auto", "hf", "huggingface")
67
+ class HFLM(TemplateLM):
68
+ """
69
+ An abstracted Huggingface model class. Enables usage with both models of
70
+ `transformers.AutoModelForCausalLM` and `transformers.AutoModelForSeq2SeqLM` classes.
71
+
72
+ Supports data-parallel multi-GPU with HF Accelerate.
73
+ """
74
+
75
+ AUTO_MODEL_CLASS = None
76
+ _DEFAULT_MAX_LENGTH = 2048
77
+
78
+ def __init__(
79
+ self,
80
+ pretrained: Optional[Union[str, transformers.PreTrainedModel]] = "gpt2",
81
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
82
+ # override whether the model should be treated as decoder-only (causal) or encoder-decoder (seq2seq)
83
+ revision: Optional[str] = "main",
84
+ subfolder: Optional[str] = None,
85
+ tokenizer: Optional[
86
+ Union[
87
+ str,
88
+ transformers.PreTrainedTokenizer,
89
+ transformers.PreTrainedTokenizerFast,
90
+ ]
91
+ ] = None,
92
+ truncation: Optional[bool] = False,
93
+ logits_cache: bool = True,
94
+ max_length: Optional[int] = None,
95
+ device: Optional[str] = "cuda",
96
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
97
+ batch_size: Optional[Union[int, str]] = 1,
98
+ max_batch_size: Optional[int] = 64,
99
+ trust_remote_code: Optional[bool] = False,
100
+ use_fast_tokenizer: Optional[bool] = True,
101
+ add_bos_token: Optional[bool] = False,
102
+ prefix_token_id: Optional[int] = None,
103
+ # arguments used for splitting a model across GPUs naively.
104
+ # only used if `parallelize=True`.
105
+ parallelize: Optional[bool] = False,
106
+ device_map_option: Optional[str] = "auto",
107
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
108
+ max_cpu_memory: Optional[Union[int, str]] = None,
109
+ offload_folder: Optional[Union[str, os.PathLike]] = "./offload",
110
+ # PEFT and quantization options
111
+ peft: Optional[str] = None,
112
+ autogptq: Optional[Union[bool, str]] = False,
113
+ **kwargs,
114
+ ) -> None:
115
+ super().__init__()
116
+
117
+ # optionally: take in an already-initialized transformers.PreTrainedModel
118
+ if not isinstance(pretrained, str):
119
+ eval_logger.warning(
120
+ "`pretrained` model kwarg is not of type `str`. Many other model arguments may be ignored. Please do not launch via accelerate or use `parallelize=True` if passing an existing model this way."
121
+ )
122
+ assert not parallelize, "`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`"
123
+ self._model = pretrained
124
+ self._device = self._model.device
125
+ self._config = self._model.config
126
+ gpus = 0
127
+
128
+ if tokenizer:
129
+ assert isinstance(
130
+ tokenizer, transformers.PreTrainedTokenizer
131
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
132
+ self.tokenizer = tokenizer
133
+ else:
134
+ # Get tokenizer
135
+ model_name = self._model.name_or_path
136
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
137
+ model_name,
138
+ revision=revision,
139
+ trust_remote_code=trust_remote_code,
140
+ use_fast=use_fast_tokenizer,
141
+ )
142
+
143
+ else:
144
+ assert isinstance(device, str)
145
+ assert isinstance(pretrained, str)
146
+ assert isinstance(batch_size, (int, str))
147
+
148
+ gpus = torch.cuda.device_count()
149
+ accelerator_kwargs = InitProcessGroupKwargs(timeout=timedelta(weeks=52))
150
+ accelerator = Accelerator(kwargs_handlers=[accelerator_kwargs])
151
+ if accelerator.num_processes > 1:
152
+ self.accelerator = accelerator
153
+
154
+ if not (parallelize or accelerator.num_processes > 1):
155
+ # use user-passed device
156
+ device_list = set(
157
+ ["cuda", "cpu"]
158
+ + [f"cuda:{i}" for i in range(torch.cuda.device_count())]
159
+ + ["mps", "mps:0"]
160
+ )
161
+ if device and device in device_list:
162
+ self._device = torch.device(device)
163
+ eval_logger.info(f"Using device '{device}'")
164
+ if device in ("mps", "mps:0") and version.parse(
165
+ torch.__version__
166
+ ) < version.parse("2.1"):
167
+ raise RuntimeError(
168
+ f"mps requires torch >= 2.1. You have {torch.__version__}"
169
+ )
170
+ else:
171
+ eval_logger.info("Device not specified")
172
+ eval_logger.info(f"Cuda Available? {torch.cuda.is_available()}")
173
+ self._device = (
174
+ torch.device("cuda")
175
+ if torch.cuda.is_available()
176
+ else torch.device("cpu")
177
+ )
178
+ else:
179
+ if device != "cuda":
180
+ eval_logger.info(
181
+ f"Using `accelerate launch` or `parallelize=True`, device '{device}' will be overridden when placing model."
182
+ )
183
+ # TODO: include in warning that `load_in_8bit` etc. affect this too
184
+ self._device = torch.device(device)
185
+
186
+ # TODO: update this to be less of a hack once subfolder is fixed in HF
187
+ revision = revision + ("/" + subfolder if subfolder is not None else "")
188
+
189
+ self._get_config(
190
+ pretrained,
191
+ revision=revision,
192
+ trust_remote_code=trust_remote_code,
193
+ )
194
+
195
+ # determine which of 'causal' and 'seq2seq' backends to use
196
+ self._get_backend(
197
+ config=self.config, backend=backend, trust_remote_code=trust_remote_code
198
+ )
199
+
200
+ # if we passed `pretrained` as a string, initialize our model now
201
+ if isinstance(pretrained, str):
202
+ self._create_model(
203
+ pretrained=pretrained,
204
+ revision=revision,
205
+ dtype=dtype,
206
+ trust_remote_code=trust_remote_code,
207
+ parallelize=parallelize,
208
+ device_map_option=device_map_option,
209
+ max_memory_per_gpu=max_memory_per_gpu,
210
+ max_cpu_memory=max_cpu_memory,
211
+ offload_folder=offload_folder,
212
+ peft=peft,
213
+ autogptq=autogptq,
214
+ **kwargs,
215
+ )
216
+
217
+ # access self._model through self.model property outside this method
218
+ if isinstance(self.model, torch.nn.Module):
219
+ self.model.eval()
220
+ self.model.tie_weights()
221
+
222
+ if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"):
223
+ # TODO: can remove this whole snippet except in the mps case, perhaps?
224
+ if not (parallelize or autogptq or hasattr(self, "accelerator")):
225
+ # place model onto device requested manually,
226
+ # if not using HF Accelerate or device_map
227
+ # or any other option that preloads model onto device
228
+ try:
229
+ self.model.to(self.device)
230
+ except ValueError:
231
+ eval_logger.debug(
232
+ "Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore."
233
+ )
234
+
235
+ self._create_tokenizer(
236
+ pretrained,
237
+ tokenizer,
238
+ revision=revision,
239
+ trust_remote_code=trust_remote_code,
240
+ use_fast_tokenizer=use_fast_tokenizer,
241
+ )
242
+
243
+ self.truncation = truncation
244
+ self.logits_cache = logits_cache
245
+ self.vocab_size = self.tokenizer.vocab_size
246
+ # select (or create) a pad token to use
247
+ if self.tokenizer.pad_token:
248
+ pass
249
+ elif self.tokenizer.unk_token:
250
+ self.tokenizer.pad_token_id = self.tokenizer.unk_token_id
251
+ elif self.tokenizer.eos_token:
252
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
253
+ else:
254
+ if getattr(self.config, "model_type", None) == "qwen":
255
+ # Qwen's trust_remote_code tokenizer does not allow for adding special tokens
256
+ self.tokenizer.pad_token = "<|endoftext|>"
257
+ elif (
258
+ self.tokenizer.__class__.__name__ == "RWKVWorldTokenizer"
259
+ or self.tokenizer.__class__.__name__ == "Rwkv5Tokenizer"
260
+ ):
261
+ # The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0)
262
+ # The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer
263
+ # ---
264
+ # Note that the world tokenizer class name, might change in the future for the final huggingface merge
265
+ # https://github.com/huggingface/transformers/pull/26963
266
+ assert self.tokenizer.pad_token_id == 0
267
+ else:
268
+ self.tokenizer.add_special_tokens({"pad_token": "<|pad|>"})
269
+
270
+ # TODO: override this for Gemma
271
+ self.add_bos_token = add_bos_token
272
+ if getattr(self.config, "model_type", None) == "gemma":
273
+ self.add_bos_token = True
274
+ eval_logger.info(
275
+ f"Model type is '{self.config.model_type}', a BOS token will be used as Gemma underperforms without it."
276
+ )
277
+
278
+ self._max_length = max_length
279
+
280
+ self.batch_schedule = 1
281
+ self.batch_sizes = {}
282
+ self.max_batch_size = max_batch_size
283
+
284
+ if str(batch_size).startswith("auto"):
285
+ batch_size = batch_size.split(":")
286
+ self.batch_size_per_gpu = batch_size[0]
287
+ self.batch_schedule = float(batch_size[1]) if len(batch_size) > 1 else 1
288
+ else:
289
+ self.batch_size_per_gpu = int(batch_size)
290
+
291
+ if isinstance(pretrained, str):
292
+ # multigpu data-parallel support when launched with accelerate
293
+ if gpus > 1:
294
+ if parallelize:
295
+ if accelerator.num_processes > 1:
296
+ raise RuntimeError(
297
+ "Attempted to use both a HF Accelerate `device_map` and to launch via `accelerate launch`. If this is the case, please either remove `parallelize=True` from --model_args or launch outside of the Accelerate launcher."
298
+ )
299
+ else:
300
+ pass
301
+ elif accelerator.num_processes == 1:
302
+ # if we aren't launching via accelerate, ditch
303
+ self._rank = 0
304
+ self._world_size = 1
305
+ else:
306
+ if gpus > accelerator.num_processes:
307
+ eval_logger.warning(
308
+ "WARNING: The number of total system GPUs does not match the number of spawned processes. "
309
+ "If you would like to use data parallelism, please launch the script "
310
+ "with 'accelerate launch *script*'. "
311
+ f"Current run will proceed with {accelerator.num_processes} devices."
312
+ )
313
+ assert (
314
+ accelerator.distributed_type
315
+ in [
316
+ DistributedType.FSDP,
317
+ DistributedType.MULTI_GPU,
318
+ ]
319
+ ), "Unsupported distributed type provided. Only DDP and FSDP are supported."
320
+ if accelerator.distributed_type == DistributedType.FSDP:
321
+ self._model = accelerator.prepare(self.model)
322
+ else:
323
+ self._model = accelerator.prepare_model(
324
+ self.model, evaluation_mode=True
325
+ )
326
+ self._device = torch.device(
327
+ f"cuda:{accelerator.local_process_index}"
328
+ )
329
+ self.accelerator = accelerator
330
+
331
+ if self.accelerator.is_local_main_process:
332
+ eval_logger.info(f"Using {gpus} devices with data parallelism")
333
+
334
+ self._rank = self.accelerator.local_process_index
335
+ self._world_size = self.accelerator.num_processes
336
+ else:
337
+ # if a PreTrainedModel was passed into HFLM, we forgo distributed setup.
338
+ eval_logger.warning(
339
+ "Passed an already-initialized model through `pretrained`, assuming single-process call to evaluate() or custom distributed integration"
340
+ )
341
+ self._rank = 0
342
+ self._world_size = 1
343
+
344
+ self.custom_prefix_token_id = prefix_token_id
345
+ if prefix_token_id is not None:
346
+ eval_logger.info(
347
+ f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
348
+ )
349
+
350
+ @property
351
+ def config(self):
352
+ # return the associated transformers.AutoConfig for the given pretrained model.
353
+ return self._config
354
+
355
+ @property
356
+ def model(self):
357
+ # returns the model, unwrapping it if using Accelerate
358
+ if hasattr(self, "accelerator"):
359
+ return self.accelerator.unwrap_model(self._model)
360
+ else:
361
+ return self._model
362
+
363
+ @property
364
+ def eot_token_id(self):
365
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
366
+ return self.tokenizer.eos_token_id
367
+
368
+ @property
369
+ def prefix_token_id(self):
370
+ # it is used as prefix for loglikelihood
371
+ if self.custom_prefix_token_id is not None:
372
+ return self.custom_prefix_token_id
373
+ if self.tokenizer.bos_token_id is not None:
374
+ return self.tokenizer.bos_token_id
375
+ return self.tokenizer.eos_token_id
376
+
377
+ @property
378
+ def max_length(self):
379
+ if self._max_length: # if max length manually set, return it
380
+ return self._max_length
381
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
382
+ for attr in seqlen_config_attrs:
383
+ if hasattr(self.model.config, attr):
384
+ return getattr(self.model.config, attr)
385
+ if hasattr(self.tokenizer, "model_max_length"):
386
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
387
+ return self._DEFAULT_MAX_LENGTH
388
+ return self.tokenizer.model_max_length
389
+ return self._DEFAULT_MAX_LENGTH
390
+
391
+ @property
392
+ def max_gen_toks(self) -> int:
393
+ return 256
394
+
395
+ @property
396
+ def batch_size(self):
397
+ return self.batch_size_per_gpu
398
+
399
+ @property
400
+ def device(self):
401
+ return self._device
402
+
403
+ @property
404
+ def rank(self):
405
+ return self._rank
406
+
407
+ @property
408
+ def world_size(self):
409
+ return self._world_size
410
+
411
+ def _get_backend(
412
+ self,
413
+ config: Union[transformers.PretrainedConfig, transformers.AutoConfig],
414
+ backend: Optional[Literal["default", "causal", "seq2seq"]] = "default",
415
+ trust_remote_code: Optional[bool] = False,
416
+ ) -> None:
417
+ """
418
+ Helper method during initialization.
419
+ Determines the backend ("causal" (decoder-only) or "seq2seq" (encoder-decoder))
420
+ model type to be used.
421
+ """
422
+ assert backend in ["default", "causal", "seq2seq"]
423
+
424
+ if backend != "default":
425
+ # if we've settled on non-default backend, use that manually
426
+ if backend == "causal":
427
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
428
+ elif backend == "seq2seq":
429
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
430
+ eval_logger.info(
431
+ f"Overrode HF model backend type, and using type '{backend}'"
432
+ )
433
+ else:
434
+ # determine and use the default HF backend for this model, based on its config + metadata.
435
+ if (
436
+ getattr(config, "model_type")
437
+ in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
438
+ ):
439
+ # first check if model type is listed under seq2seq models, since some
440
+ # models like MBart are listed in both seq2seq and causal mistakenly in HF transformers.
441
+ # these special cases should be treated as seq2seq models.
442
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForSeq2SeqLM
443
+ elif (
444
+ getattr(self.config, "model_type") in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
445
+ ):
446
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
447
+ else:
448
+ if not trust_remote_code:
449
+ eval_logger.warning(
450
+ "HF model type is neither marked as CausalLM or Seq2SeqLM. \
451
+ This is expected if your model requires `trust_remote_code=True` but may be an error otherwise."
452
+ )
453
+ # if model type is neither in HF transformers causal or seq2seq model registries
454
+ # then we default to AutoModelForCausalLM
455
+ self.AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM
456
+
457
+ assert self.AUTO_MODEL_CLASS in [
458
+ transformers.AutoModelForCausalLM,
459
+ transformers.AutoModelForSeq2SeqLM,
460
+ ]
461
+ return None
462
+
463
+ def _get_config(
464
+ self,
465
+ pretrained: str,
466
+ revision: str = "main",
467
+ trust_remote_code: bool = False,
468
+ ) -> None:
469
+ self._config = transformers.AutoConfig.from_pretrained(
470
+ pretrained,
471
+ revision=revision,
472
+ trust_remote_code=trust_remote_code,
473
+ )
474
+
475
+ def _create_model(
476
+ self,
477
+ pretrained: str,
478
+ revision: Optional[str] = "main",
479
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
480
+ trust_remote_code: Optional[bool] = False,
481
+ # arguments used for splitting a model across GPUs naively.
482
+ # only used if `parallelize=True`.
483
+ # (accelerate naive PP (device_map) options)
484
+ parallelize: Optional[bool] = False,
485
+ device_map_option: Optional[str] = "auto",
486
+ max_memory_per_gpu: Optional[Union[int, str]] = None,
487
+ max_cpu_memory: Optional[Union[int, str]] = None,
488
+ offload_folder: Optional[str] = "./offload",
489
+ # PEFT and quantization options
490
+ peft: Optional[str] = None,
491
+ autogptq: Optional[Union[bool, str]] = False,
492
+ **kwargs,
493
+ ) -> None:
494
+ """
495
+ Initializes an HF or HF-compatible PreTrainedModel from scratch
496
+ inside HFLM, using the kwargs passed into self.__init__().
497
+
498
+ Also handles functionality such as AutoGPTQ usage and PEFT wrapping.
499
+
500
+ For future similar extensions to AutoGPTQ that are not core to HF's ecosystem,
501
+ (such as PyTorch models that are nearly, but not quite, fully mirroring
502
+ HF's public interface relied on in this HFLM class)
503
+ please consider subclassing HFLM and overriding this and other methods as needed.
504
+ """
505
+
506
+ model_kwargs = kwargs if kwargs else {}
507
+
508
+ if parallelize:
509
+ model_kwargs.update(
510
+ _get_accelerate_args(
511
+ device_map_option, # TODO: phase out device_map_option?
512
+ max_memory_per_gpu,
513
+ max_cpu_memory,
514
+ offload_folder,
515
+ )
516
+ )
517
+ elif "device_map" not in model_kwargs:
518
+ # set a device_map to initialize model on the right GPU.
519
+ # this is needed because it seems that the default behavior
520
+ # for quantized models now seems to be device_map="auto"
521
+ # which breaks data-parallel mode.
522
+ if hasattr(self, "accelerator"):
523
+ model_kwargs.update(
524
+ {"device_map": {"": f"cuda:{self.accelerator.local_process_index}"}}
525
+ )
526
+ else:
527
+ model_kwargs.update({"device_map": {"": str(self.device)}})
528
+
529
+ if not autogptq:
530
+ if model_kwargs.get("load_in_4bit", None):
531
+ assert (
532
+ transformers.__version__ >= "4.30.0"
533
+ ), "load_in_4bit requires transformers >= 4.30.0"
534
+ if transformers.__version__ >= "4.30.0":
535
+ if model_kwargs.get("load_in_4bit", None):
536
+ if model_kwargs.get("bnb_4bit_compute_dtype", None):
537
+ model_kwargs["bnb_4bit_compute_dtype"] = get_dtype(
538
+ model_kwargs["bnb_4bit_compute_dtype"]
539
+ )
540
+ self._model = self.AUTO_MODEL_CLASS.from_pretrained(
541
+ pretrained,
542
+ revision=revision,
543
+ torch_dtype=get_dtype(dtype),
544
+ trust_remote_code=trust_remote_code,
545
+ **model_kwargs,
546
+ )
547
+ else:
548
+ try:
549
+ from auto_gptq import AutoGPTQForCausalLM
550
+ except ModuleNotFoundError:
551
+ raise Exception(
552
+ "Tried to load auto_gptq, but auto-gptq is not installed ",
553
+ "please install auto-gptq via pip install lm-eval[gptq] or pip install -e .[gptq]",
554
+ )
555
+
556
+ self._model = AutoGPTQForCausalLM.from_quantized(
557
+ pretrained,
558
+ trust_remote_code=trust_remote_code,
559
+ model_basename=None if autogptq is True else Path(autogptq).stem,
560
+ use_safetensors=True
561
+ if autogptq is True
562
+ else autogptq.endswith(".safetensors"),
563
+ **model_kwargs,
564
+ )
565
+
566
+ if peft:
567
+ if model_kwargs.get("load_in_4bit", None):
568
+ if version.parse(PEFT_VERSION) < version.parse("0.4.0"):
569
+ raise AssertionError("load_in_4bit requires peft >= 0.4.0")
570
+ self._model = PeftModel.from_pretrained(
571
+ self._model, peft, revision=revision
572
+ )
573
+
574
+ return None
575
+
576
+ def _create_tokenizer(
577
+ self,
578
+ pretrained: Union[str, transformers.PreTrainedModel],
579
+ tokenizer: Optional[
580
+ Union[
581
+ str,
582
+ transformers.PreTrainedTokenizer,
583
+ transformers.PreTrainedTokenizerFast,
584
+ ]
585
+ ],
586
+ revision: Optional[str] = "main",
587
+ trust_remote_code: Optional[bool] = False,
588
+ use_fast_tokenizer: Optional[bool] = True,
589
+ ) -> None:
590
+ """
591
+ Helper method during initialization.
592
+
593
+ Create a tokenizer object corresponding to the correct
594
+ tokenizer for value of `pretrained`, or use the pre-initialized tokenizer passed.
595
+ """
596
+
597
+ if tokenizer:
598
+ if isinstance(tokenizer, str):
599
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
600
+ tokenizer,
601
+ revision=revision,
602
+ trust_remote_code=trust_remote_code,
603
+ use_fast=use_fast_tokenizer,
604
+ )
605
+ else:
606
+ assert isinstance(
607
+ tokenizer, transformers.PreTrainedTokenizer
608
+ ) or isinstance(tokenizer, transformers.PreTrainedTokenizerFast)
609
+ self.tokenizer = tokenizer
610
+ else:
611
+ # Get tokenizer based on 'pretrained'
612
+ if isinstance(pretrained, str):
613
+ model_name = pretrained
614
+ else:
615
+ # get the HF hub name via accessor on model
616
+ model_name = self.model.name_or_path
617
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
618
+ model_name,
619
+ revision=revision,
620
+ trust_remote_code=trust_remote_code,
621
+ use_fast=use_fast_tokenizer,
622
+ )
623
+ return None
624
+
625
+ def _detect_batch_size(self, requests=None, pos: int = 0):
626
+ if requests:
627
+ _, context_enc, continuation_enc = requests[pos]
628
+ max_length = len(
629
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1]
630
+ )
631
+ max_context_enc = len(context_enc[-(self.max_length + 1) :])
632
+ max_cont_enc = len(continuation_enc[-(self.max_length + 1) :])
633
+ else:
634
+ max_length = self.max_length
635
+
636
+ # if OOM, then halves batch_size and tries again
637
+ @find_executable_batch_size(starting_batch_size=self.max_batch_size)
638
+ def forward_batch(batch_size):
639
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
640
+ length = max(max_context_enc, max_cont_enc)
641
+ batched_conts = torch.ones(
642
+ (batch_size, length), device=self.device
643
+ ).long()
644
+ test_batch = torch.ones((batch_size, length), device=self.device).long()
645
+ call_kwargs = {
646
+ "attn_mask": test_batch,
647
+ "labels": batched_conts,
648
+ }
649
+ else:
650
+ call_kwargs = {}
651
+ test_batch = torch.ones(
652
+ (batch_size, max_length), device=self.device
653
+ ).long()
654
+ for _ in range(5):
655
+ out = F.log_softmax(self._model_call(test_batch, **call_kwargs), dim=-1) # noqa: F841
656
+
657
+ return batch_size
658
+
659
+ try:
660
+ batch_size = forward_batch()
661
+ except RuntimeError as e:
662
+ if "No executable batch size found" in str(e):
663
+ batch_size = 1
664
+ else:
665
+ raise
666
+
667
+ if self.world_size > 1:
668
+ # if multi-GPU, always take minimum over all selected batch sizes
669
+ max_rnk_bs = torch.tensor([batch_size], device=self.device)
670
+ gathered = (
671
+ self.accelerator.gather(max_rnk_bs).cpu().detach().numpy().tolist()
672
+ )
673
+ batch_size = min(gathered)
674
+ clear_torch_cache()
675
+ return batch_size
676
+
677
+ clear_torch_cache()
678
+ return batch_size
679
+
680
+ def tok_encode(
681
+ self, string: str, left_truncate_len=None, add_special_tokens=None
682
+ ) -> List[int]:
683
+ """ """
684
+ # default for None - empty dict, use predefined tokenizer param
685
+ # used for all models except for CausalLM or predefined value
686
+ special_tokens_kwargs = {}
687
+
688
+ # by default for CausalLM - false or self.add_bos_token is set
689
+ if add_special_tokens is None:
690
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
691
+ special_tokens_kwargs = {
692
+ "add_special_tokens": False or self.add_bos_token
693
+ }
694
+ # otherwise the method explicitly defines the value
695
+ else:
696
+ special_tokens_kwargs = {"add_special_tokens": add_special_tokens}
697
+
698
+ encoding = self.tokenizer.encode(string, **special_tokens_kwargs)
699
+
700
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
701
+ if left_truncate_len:
702
+ encoding = encoding[-left_truncate_len:]
703
+
704
+ return encoding
705
+
706
+ def tok_batch_encode(
707
+ self,
708
+ strings: List[str],
709
+ padding_side: str = "left",
710
+ left_truncate_len: int = None,
711
+ truncation: bool = False,
712
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
713
+ # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
714
+ old_padding_side = self.tokenizer.padding_side
715
+ self.tokenizer.padding_side = padding_side
716
+
717
+ add_special_tokens = {}
718
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
719
+ add_special_tokens = {"add_special_tokens": False or self.add_bos_token}
720
+
721
+ encoding = self.tokenizer(
722
+ strings,
723
+ truncation=truncation,
724
+ padding="longest",
725
+ return_tensors="pt",
726
+ **add_special_tokens,
727
+ )
728
+ if left_truncate_len:
729
+ encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
730
+ encoding["attention_mask"] = encoding["attention_mask"][
731
+ :, -left_truncate_len:
732
+ ]
733
+ self.tokenizer.padding_side = old_padding_side
734
+
735
+ return encoding["input_ids"], encoding["attention_mask"]
736
+
737
+ def tok_decode(self, tokens, skip_special_tokens=True):
738
+ return self.tokenizer.decode(tokens, skip_special_tokens=skip_special_tokens)
739
+
740
+ def _model_call(self, inps, attn_mask=None, labels=None):
741
+ """
742
+ :param inps: torch.Tensor
743
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)] or of shape
744
+ [batch, sequence_ctx]. the size of sequence may vary from call to call
745
+ :param attn_mask: torch.Tensor, optional
746
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
747
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
748
+ :param labels: torch.Tensor, optional
749
+ A torch tensor of shape [batch, (sequence_ctx + sequence_cont)]. Only passed
750
+ (and must be passed) if self.AUTO_MODEL_CLASS is transformers.AutoModelForSeq2SeqLM
751
+ :return
752
+ A torch tensor of shape [batch, sequence, vocab] with the
753
+ logits returned from the model's decoder
754
+ """
755
+ with torch.no_grad():
756
+ if attn_mask is not None or labels is not None:
757
+ assert attn_mask is not None and labels is not None
758
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM
759
+ return self.model(
760
+ input_ids=inps, attention_mask=attn_mask, labels=labels
761
+ ).logits
762
+ else:
763
+ assert self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
764
+ return self.model(inps).logits
765
+
766
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
767
+ # temperature = 0.0 if not set
768
+ # if do_sample is false and temp==0.0:
769
+ # remove temperature, as do_sample=False takes care of this
770
+ # and we don't want a warning from HF
771
+ generation_kwargs["temperature"] = generation_kwargs.get("temperature", 0.0)
772
+ do_sample = generation_kwargs.get("do_sample", None)
773
+
774
+ # The temperature has to be a strictly positive float -- if it is 0.0, use greedy decoding strategies
775
+ if generation_kwargs.get("temperature") == 0.0 and do_sample is None:
776
+ generation_kwargs["do_sample"] = do_sample = False
777
+
778
+ if do_sample is False and generation_kwargs.get("temperature") == 0.0:
779
+ generation_kwargs.pop("temperature")
780
+ # build stopping criteria
781
+ stopping_criteria = stop_sequences_criteria(
782
+ self.tokenizer, stop, context.shape[1], context.shape[0]
783
+ )
784
+ return self.model.generate(
785
+ input_ids=context,
786
+ max_length=max_length,
787
+ stopping_criteria=stopping_criteria,
788
+ pad_token_id=self.tokenizer.pad_token_id,
789
+ use_cache=True,
790
+ **generation_kwargs,
791
+ )
792
+
793
+ def _select_cont_toks(
794
+ self, logits: torch.Tensor, contlen: int = None, inplen: int = None
795
+ ) -> torch.Tensor:
796
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
797
+ assert (
798
+ contlen and inplen
799
+ ), "Must pass input len and cont. len to select scored logits for causal LM"
800
+ # discard right-padding.
801
+ # also discard the input/context tokens. we'll only score continuations.
802
+ logits = logits[inplen - contlen : inplen]
803
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
804
+ assert (
805
+ contlen and not inplen
806
+ ), "Selecting scored logits for Seq2SeqLM requires only cont. len"
807
+ # only discard right-padding.
808
+ # the logits input to this fn only contain decoder-side tokens.
809
+ logits = logits[:contlen]
810
+
811
+ return logits
812
+
813
+ def loglikelihood_rolling(
814
+ self, requests: List[Instance], disable_tqdm: bool = False
815
+ ) -> List[float]:
816
+ loglikelihoods = []
817
+
818
+ adaptive_batch_size = None
819
+ if self.batch_size == "auto":
820
+ # using rolling window with maximum context
821
+ print("Passed argument batch_size = auto. Detecting largest batch size")
822
+ batch_size = self._detect_batch_size()
823
+ print(f"Determined Largest batch size: {batch_size}")
824
+ adaptive_batch_size = batch_size
825
+
826
+ for (string,) in tqdm(
827
+ [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0))
828
+ ):
829
+ rolling_token_windows = list(
830
+ map(
831
+ utils.make_disjoint_window,
832
+ utils.get_rolling_token_windows(
833
+ token_list=self.tok_encode(string),
834
+ prefix_token=self.prefix_token_id,
835
+ max_seq_len=self.max_length,
836
+ context_len=1,
837
+ ),
838
+ )
839
+ )
840
+
841
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
842
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
843
+
844
+ pad_amnt = 0
845
+ if self.world_size > 1:
846
+ # We pad out the external document-level iterator so the inner iterator doesn't hang
847
+ mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
848
+ gathered = (
849
+ self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
850
+ )
851
+
852
+ pad_amnt = max(gathered) - gathered[self.rank]
853
+ if pad_amnt > 0:
854
+ rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
855
+
856
+ string_nll = self._loglikelihood_tokens(
857
+ requests=rolling_token_windows,
858
+ disable_tqdm=True,
859
+ override_bs=adaptive_batch_size,
860
+ )
861
+
862
+ if (self.world_size > 1) and (pad_amnt > 0):
863
+ string_nll = [x[0] for x in string_nll[:-pad_amnt]]
864
+ else:
865
+ # discard is_greedy
866
+ string_nll = [x[0] for x in string_nll]
867
+
868
+ string_nll = sum(string_nll)
869
+ loglikelihoods.append(string_nll)
870
+
871
+ return loglikelihoods
872
+
873
+ def _batch_scheduler(self, pos, n_reordered_requests):
874
+ sched = pos // int(len(n_reordered_requests) / self.batch_schedule)
875
+ if sched in self.batch_sizes:
876
+ return self.batch_sizes[sched]
877
+ if (len(self.batch_sizes) > 1) and (
878
+ self.batch_sizes[sched - 1] == self.max_batch_size
879
+ ):
880
+ # if previous batch size is already maximal, skip recomputation
881
+ self.batch_sizes[sched] = self.max_batch_size
882
+ return self.batch_sizes[sched]
883
+ print(
884
+ f"Passed argument batch_size = auto:{self.batch_schedule}. Detecting largest batch size"
885
+ )
886
+ self.batch_sizes[sched] = self._detect_batch_size(n_reordered_requests, pos)
887
+ print(f"Determined largest batch size: {self.batch_sizes[sched]}")
888
+ return self.batch_sizes[sched]
889
+
890
+ def _loglikelihood_tokens(
891
+ self,
892
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
893
+ disable_tqdm: bool = False,
894
+ override_bs: int = None,
895
+ ) -> List[Tuple[float, bool]]:
896
+ # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
897
+ res = []
898
+
899
+ def _collate(req: Tuple[Tuple[str, str], List[int], List[int]]):
900
+ """Defines the key for the sorted method"""
901
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
902
+ # - time estimates will always be over not underestimates, which is more useful for planning
903
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
904
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
905
+ # automatic adaptive batches much much easier to implement
906
+ # - any OOMs will happen right away rather than near the end
907
+
908
+ toks = req[1] + req[2]
909
+ return -len(toks), tuple(toks)
910
+
911
+ def _lookup_one_token_cont(req: Tuple[Tuple[str, str], List[int], List[int]]):
912
+ """Defines the key to group and lookup one-token continuations"""
913
+ # Use with group_by="contexts" (optional)"
914
+ # allows for the creation of a lookup, so we can reuse logits in case of one-token continuations.
915
+ # speeds up some multiple-choice tasks proportionally to the number of choices.
916
+ # groups requests by context+continuation[:-1] and infer on one request/group.
917
+ return req[-2] + req[-1][:-1]
918
+
919
+ re_ord = Collator(
920
+ requests,
921
+ sort_fn=_collate,
922
+ group_by="contexts"
923
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
924
+ and self.logits_cache
925
+ else None,
926
+ group_fn=_lookup_one_token_cont,
927
+ )
928
+
929
+ # automatic (variable) batch size detection for vectorization
930
+ # pull longest context sample from request
931
+ n_reordered_requests = len(re_ord)
932
+ batch_size = (
933
+ self.batch_size
934
+ if self.batch_size != "auto"
935
+ else override_bs
936
+ if override_bs is not None
937
+ else 0
938
+ )
939
+ batch_fn = (
940
+ self._batch_scheduler
941
+ if self.batch_size == "auto"
942
+ and n_reordered_requests > 0
943
+ and not override_bs
944
+ else None
945
+ )
946
+
947
+ chunks = re_ord.get_batched(n=batch_size, batch_fn=batch_fn)
948
+ pbar = tqdm(
949
+ total=len(requests),
950
+ disable=(disable_tqdm or (self.rank != 0)),
951
+ desc="Running loglikelihood requests",
952
+ )
953
+ for chunk in chunks:
954
+ inps = []
955
+ cont_toks_list = []
956
+ inplens = []
957
+
958
+ conts = []
959
+ encoder_attns = []
960
+
961
+ padding_len_inp = None
962
+ padding_len_cont = None
963
+ # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
964
+ # tensors, then we pack them together into a batch, call the model, and then pick it all apart
965
+ # again because vectorizing is annoying
966
+
967
+ for _, context_enc, continuation_enc in chunk:
968
+ # sanity check
969
+ assert len(context_enc) > 0
970
+ assert len(continuation_enc) > 0
971
+ assert len(continuation_enc) <= self.max_length
972
+
973
+ # how this all works (illustrated on a causal decoder-only setup):
974
+ # CTX CONT
975
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
976
+ # model \ \
977
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
978
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
979
+
980
+ # when too long to fit in context, truncate from the left
981
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
982
+ inp = torch.tensor(
983
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
984
+ dtype=torch.long,
985
+ device=self.device,
986
+ )
987
+ (inplen,) = inp.shape
988
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
989
+ inp = torch.tensor(
990
+ (context_enc)[-self.max_length :],
991
+ dtype=torch.long,
992
+ device=self.device,
993
+ )
994
+ (inplen,) = inp.shape
995
+
996
+ # build encoder attn masks
997
+ encoder_attns.append(torch.ones_like(inp))
998
+
999
+ cont = torch.tensor(
1000
+ (continuation_enc)[-self.max_length :],
1001
+ # TODO: left-shift these?
1002
+ # TODO: our code assumes we never end up truncating conts for either model type
1003
+ dtype=torch.long,
1004
+ device=self.device,
1005
+ )
1006
+ (contlen,) = cont.shape
1007
+
1008
+ conts.append(cont)
1009
+
1010
+ padding_len_cont = (
1011
+ max(padding_len_cont, contlen)
1012
+ if padding_len_cont is not None
1013
+ else contlen
1014
+ )
1015
+
1016
+ padding_len_inp = (
1017
+ max(padding_len_inp, inplen)
1018
+ if padding_len_inp is not None
1019
+ else inplen
1020
+ )
1021
+
1022
+ inps.append(inp) # [1, inp_length]
1023
+ cont_toks_list.append(continuation_enc)
1024
+ inplens.append(inplen)
1025
+
1026
+ # create encoder attn mask and batched conts, if seq2seq
1027
+ call_kwargs = {}
1028
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1029
+ batched_inps = pad_and_concat(
1030
+ padding_len_inp, inps, padding_side="right"
1031
+ ) # [batch, padding_len_inp]
1032
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1033
+ # TODO: left-pad encoder inps and mask?
1034
+ batched_inps = pad_and_concat(
1035
+ padding_len_inp, inps
1036
+ ) # [batch, padding_len_inp]
1037
+ batched_conts = pad_and_concat(
1038
+ padding_len_cont, conts
1039
+ ) # [batch, padding_len_cont]
1040
+ batched_encoder_mask = pad_and_concat(
1041
+ padding_len_inp, encoder_attns
1042
+ ) # [batch, padding_len_inp]
1043
+ call_kwargs = {
1044
+ "attn_mask": batched_encoder_mask,
1045
+ "labels": batched_conts,
1046
+ }
1047
+
1048
+ multi_logits = F.log_softmax(
1049
+ self._model_call(batched_inps, **call_kwargs), dim=-1
1050
+ ) # [batch, padding_length (inp or cont), vocab]
1051
+
1052
+ for (request_str, ctx_tokens, _), logits, inplen, cont_toks in zip(
1053
+ chunk, multi_logits, inplens, cont_toks_list
1054
+ ):
1055
+ # Slice to original seq length
1056
+ contlen = len(cont_toks)
1057
+ # take only logits in the continuation
1058
+ # (discard context toks if decoder-only ; discard right-padding)
1059
+ # also discards + checks for "virtual tokens" in the causal LM's input window
1060
+ # from prompt/prefix tuning tokens, if applicable
1061
+ ctx_len = (
1062
+ inplen + (logits.shape[0] - padding_len_inp)
1063
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM
1064
+ else None
1065
+ )
1066
+ logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
1067
+ logits = logits.unsqueeze(0) # [1, seq, vocab]
1068
+
1069
+ # Check if per-token argmax is exactly equal to continuation
1070
+ greedy_tokens = logits.argmax(dim=-1)
1071
+
1072
+ # check for one-token continuation cache hits.
1073
+ # noop in case group_by != "contexts" or no cache hit and returns the
1074
+ # original args. Otherwise, expands the logits batch dimension and yields each
1075
+ # batch along with matching continuation tokens and prompt strings.
1076
+ # logits -> [1, seq, vocab]
1077
+ for request_str, cont_toks, logits in re_ord.get_cache(
1078
+ req_str=request_str,
1079
+ cxt_toks=ctx_tokens,
1080
+ cont_toks=cont_toks,
1081
+ logits=logits,
1082
+ ):
1083
+ cont_toks = torch.tensor(
1084
+ cont_toks, dtype=torch.long, device=self.device
1085
+ ).unsqueeze(0) # [1, seq]
1086
+ max_equal = (greedy_tokens == cont_toks).all()
1087
+
1088
+ # Obtain log-probs at the corresponding continuation token indices
1089
+ # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
1090
+ logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
1091
+ -1
1092
+ ) # [1, seq]
1093
+
1094
+ # Answer: (log prob, is-exact-match)
1095
+ answer = (float(logits.sum()), bool(max_equal))
1096
+
1097
+ res.append(answer)
1098
+
1099
+ self.cache_hook.add_partial("loglikelihood", request_str, answer)
1100
+ pbar.update(1)
1101
+
1102
+ pbar.close()
1103
+
1104
+ return re_ord.get_original(res)
1105
+
1106
+ def generate_until(
1107
+ self, requests: List[Instance], disable_tqdm: bool = False
1108
+ ) -> List[str]:
1109
+ res = []
1110
+
1111
+ def _collate(req: Tuple[str, dict]):
1112
+ """Defines the key for the sorted method"""
1113
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
1114
+ # - time estimates will always be over not underestimates, which is more useful for planning
1115
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
1116
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
1117
+ # automatic adaptive batches much much easier to implement
1118
+ # - any OOMs will happen right away rather than near the end
1119
+ toks = self.tok_encode(req[0])
1120
+ return -len(toks), req[0]
1121
+
1122
+ pbar = tqdm(
1123
+ total=len(requests),
1124
+ disable=(disable_tqdm or (self.rank != 0)),
1125
+ desc="Running generate_until requests",
1126
+ )
1127
+ adaptive_batch_size = None
1128
+ if self.batch_size == "auto":
1129
+ # using rolling window with maximum context
1130
+ print("Passed argument batch_size = auto. Detecting largest batch size")
1131
+ batch_size = self._detect_batch_size()
1132
+ print(f"Determined Largest batch size: {batch_size}")
1133
+ adaptive_batch_size = batch_size
1134
+ # for each different set of kwargs, we execute all requests, by batch.
1135
+ batch_size = (
1136
+ self.batch_size
1137
+ if self.batch_size != "auto"
1138
+ else adaptive_batch_size
1139
+ if adaptive_batch_size is not None
1140
+ else 0
1141
+ )
1142
+ batch_fn = (
1143
+ self._batch_scheduler
1144
+ if self.batch_size == "auto" and not adaptive_batch_size
1145
+ else None
1146
+ )
1147
+
1148
+ # we group requests by their generation_kwargs,
1149
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
1150
+ # in the same batch.
1151
+ # group_fn=lambda x: x[1] -> x=(context, gen_kwargs)
1152
+ re_ords = Collator(
1153
+ [reg.args for reg in requests],
1154
+ sort_fn=_collate,
1155
+ group_by="gen_kwargs",
1156
+ group_fn=lambda x: x[1],
1157
+ )
1158
+ chunks = re_ords.get_batched(n=batch_size, batch_fn=batch_fn)
1159
+ for chunk in chunks:
1160
+ contexts, all_gen_kwargs = zip(*chunk)
1161
+ # we assume all gen kwargs in the batch are the same
1162
+ # this is safe to assume because the `grouper` object ensures it.
1163
+ gen_kwargs = all_gen_kwargs[0]
1164
+ # unpack our keyword arguments.
1165
+ until = None
1166
+ if isinstance(gen_kwargs, dict):
1167
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
1168
+ if "until" in kwargs.keys():
1169
+ until = kwargs.pop("until")
1170
+ if isinstance(until, str):
1171
+ until = [until]
1172
+ elif not isinstance(until, list):
1173
+ raise ValueError(
1174
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
1175
+ )
1176
+ else:
1177
+ raise ValueError(
1178
+ f"Expected `kwargs` to be of type `dict` but got {type(gen_kwargs)}"
1179
+ )
1180
+ # add EOS token to stop sequences
1181
+ eos = self.tok_decode(self.eot_token_id, skip_special_tokens=False)
1182
+ if not until:
1183
+ until = [eos]
1184
+ else:
1185
+ until.append(eos)
1186
+ if "max_gen_toks" in kwargs.keys():
1187
+ max_gen_toks = kwargs.pop("max_gen_toks")
1188
+ else:
1189
+ max_gen_toks = self.max_gen_toks
1190
+
1191
+ # set the max length in tokens of inputs ("context_enc")
1192
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1193
+ # max len for inputs = max length, minus room to generate the max new tokens
1194
+ max_ctx_len = self.max_length - max_gen_toks
1195
+ elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM:
1196
+ # max len for inputs = encoder's whole max_length
1197
+ max_ctx_len = self.max_length
1198
+
1199
+ # encode, pad, and truncate contexts for this batch
1200
+ context_enc, attn_masks = self.tok_batch_encode(
1201
+ contexts,
1202
+ left_truncate_len=max_ctx_len,
1203
+ truncation=self.truncation,
1204
+ )
1205
+ context_enc = context_enc.to(self.device)
1206
+ attn_masks = attn_masks.to(self.device)
1207
+
1208
+ if "max_length" not in kwargs:
1209
+ kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
1210
+
1211
+ # perform batched generation
1212
+ cont = self._model_generate(
1213
+ context=context_enc,
1214
+ attention_mask=attn_masks,
1215
+ stop=until,
1216
+ **kwargs,
1217
+ )
1218
+
1219
+ cont_toks_list = cont.tolist()
1220
+ for cont_toks, context in zip(cont_toks_list, contexts):
1221
+ # discard context + left-padding toks if using causal decoder-only LM
1222
+ if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM:
1223
+ cont_toks = cont_toks[context_enc.shape[1] :]
1224
+
1225
+ s = self.tok_decode(cont_toks)
1226
+
1227
+ # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
1228
+ for term in until:
1229
+ if len(term) > 0:
1230
+ # ignore '' separator,
1231
+ # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
1232
+ s = s.split(term)[0]
1233
+
1234
+ res.append(s)
1235
+
1236
+ self.cache_hook.add_partial("generate_until", (context, gen_kwargs), s)
1237
+ pbar.update(1)
1238
+ # reorder this group of results back to original unsorted form
1239
+ res = re_ords.get_original(res)
1240
+
1241
+ pbar.close()
1242
+
1243
+ return res
lm-evaluation/lm_eval/models/mamba_lm.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ import torch
4
+
5
+ import lm_eval.models.utils
6
+ from lm_eval.api.registry import register_model
7
+ from lm_eval.models.huggingface import HFLM
8
+
9
+
10
+ @register_model("mamba_ssm")
11
+ class MambaLMWrapper(HFLM):
12
+ def __init__(
13
+ self,
14
+ pretrained="state-spaces/mamba-130m",
15
+ **kwargs,
16
+ ) -> None:
17
+ """
18
+ Mamba (via the `mamba_ssm` package) supports the following args:
19
+ ```
20
+ d_model: int,
21
+ n_layer: int,
22
+ vocab_size: int,
23
+ initializer_cfg=None,
24
+ pad_vocab_size_multiple: int = 1,
25
+ ssm_cfg=None,
26
+ norm_epsilon: float = 1e-5,
27
+ rms_norm: bool = False,
28
+ initializer_cfg=None,
29
+ fused_add_norm=False,
30
+ residual_in_fp32=False,
31
+ ```
32
+
33
+ See https://github.com/state-spaces/mamba/blob/main/mamba_ssm/models/mixer_seq_simple.py#L175 for more info.
34
+ The above can all be passed via `--model_args` or to this __init__() directly
35
+ but we recommend placing many of these within the config.json file uploaded alongside your
36
+ Mamba model to the HF Hub instead.
37
+ All other HuggingFace from_pretrained() kwargs
38
+ such as those related to
39
+ `parallelize=True`, PEFT, autoGPTQ,
40
+ or any sub-configurations of these advanced args,
41
+ are unsupported by the `mamba_ssm` package.
42
+
43
+ The HFLM arguments
44
+
45
+ `backend`, `tokenizer`, `truncation`, `max_length`,
46
+ `device`, `dtype`, `batch_size`, `max_batch_size`, `trust_remote_code`, `use_fast_tokenizer`
47
+
48
+ Are all supported by Mamba where they do not conflict
49
+ with Mamba-specific restrictions such as causal LMs only.
50
+ """
51
+
52
+ if "backend" in kwargs:
53
+ # mamba currently only supports causal models
54
+ assert kwargs["backend"] == "causal"
55
+
56
+ super().__init__(
57
+ pretrained=pretrained,
58
+ # set appropriate defaults for tokenizer, max length, etc
59
+ backend=kwargs.pop("backend", "causal"),
60
+ tokenizer=kwargs.pop("tokenizer", "EleutherAI/gpt-neox-20b"),
61
+ max_length=kwargs.pop("max_length", 2048),
62
+ **kwargs,
63
+ )
64
+
65
+ def _get_config(
66
+ self,
67
+ pretrained: str,
68
+ **kwargs,
69
+ ) -> None:
70
+ try:
71
+ from mamba_ssm.utils.hf import load_config_hf # noqa: F811
72
+ except ModuleNotFoundError:
73
+ raise Exception(
74
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
75
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
76
+ )
77
+
78
+ self._config = load_config_hf(pretrained)
79
+
80
+ def _create_model(
81
+ self,
82
+ pretrained: str,
83
+ dtype: Optional[Union[str, torch.dtype]] = "float16",
84
+ # no `parallelize=True` options
85
+ # no PEFT and quantization options
86
+ # Mamba does not support arbitrary HF from_pretrained() args
87
+ **kwargs,
88
+ ) -> None:
89
+ try:
90
+ from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel # noqa: F811
91
+ except ModuleNotFoundError:
92
+ raise Exception(
93
+ "attempted to use 'mamba_ssm' LM type, but package `mamba_ssm` is not installed. \
94
+ please install mamba via `pip install lm-eval[mamba]` or `pip install -e .[mamba]`",
95
+ )
96
+
97
+ self._model = MambaLMHeadModel.from_pretrained(
98
+ pretrained,
99
+ device=self._device,
100
+ dtype=torch.float16
101
+ if dtype == "auto"
102
+ else lm_eval.models.utils.get_dtype(dtype),
103
+ )
104
+
105
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
106
+ for key in ("do_sample", "attention_mask"):
107
+ if key in generation_kwargs:
108
+ generation_kwargs.pop(key)
109
+
110
+ # mamba's custom GenerationMixin currently does not support
111
+ # passing stopping criteria.
112
+ # for the time being, we simply generate to max length,
113
+ # then truncate (equivalent result)
114
+ # -- this should be revisited to speed up generation
115
+ # stopping_criteria = stop_sequences_criteria(
116
+ # self.tokenizer, stop, 1, context.shape[0]
117
+ # )
118
+
119
+ return self.model.generate(
120
+ input_ids=context,
121
+ max_length=max_length,
122
+ # stopping_criteria=stopping_criteria,
123
+ # pad_token_id=self.tokenizer.pad_token_id,
124
+ # use_cache=True,
125
+ **generation_kwargs,
126
+ )
lm-evaluation/lm_eval/models/nemo_lm.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import importlib
16
+ import pathlib
17
+ from copy import deepcopy
18
+ from typing import List, Literal
19
+
20
+ import filelock
21
+ import numpy as np
22
+ import torch
23
+ from tqdm import tqdm
24
+
25
+ from lm_eval.api.instance import Instance
26
+ from lm_eval.api.model import LM
27
+ from lm_eval.api.registry import register_model
28
+ from lm_eval.models.utils import Collator
29
+ from lm_eval.utils import (
30
+ eval_logger,
31
+ get_rolling_token_windows,
32
+ make_disjoint_window,
33
+ simple_parse_args_string,
34
+ )
35
+
36
+
37
+ def _patch_pretrained_cfg(
38
+ pretrained_cfg, trainer, tensor_model_parallel_size, pipeline_model_parallel_size
39
+ ):
40
+ try:
41
+ import omegaconf
42
+ except ModuleNotFoundError:
43
+ raise Exception(
44
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
45
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
46
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
47
+ )
48
+
49
+ omegaconf.OmegaConf.set_struct(pretrained_cfg, True)
50
+ with omegaconf.open_dict(pretrained_cfg):
51
+ attributes_to_update = {
52
+ "sequence_parallel": False,
53
+ "activations_checkpoint_granularity": None,
54
+ "activations_checkpoint_method": None,
55
+ "precision": trainer.precision,
56
+ "global_batch_size": None,
57
+ "tensor_model_parallel_size": tensor_model_parallel_size,
58
+ "pipeline_model_parallel_size": pipeline_model_parallel_size,
59
+ "apply_rope_fusion": False,
60
+ }
61
+ for name, value in attributes_to_update.items():
62
+ if hasattr(pretrained_cfg, name):
63
+ pretrained_cfg[name] = value
64
+ return pretrained_cfg
65
+
66
+
67
+ def _get_target_from_class(target_class) -> str:
68
+ return f"{target_class.__module__}.{target_class.__name__}"
69
+
70
+
71
+ def load_model(
72
+ model_path: str,
73
+ trainer,
74
+ tensor_model_parallel_size: int,
75
+ pipeline_model_parallel_size: int,
76
+ ) -> torch.nn.Module:
77
+ try:
78
+ from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import (
79
+ MegatronGPTModel,
80
+ )
81
+ from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
82
+ except ModuleNotFoundError:
83
+ raise Exception(
84
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
85
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
86
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
87
+ )
88
+ model_path = pathlib.Path(model_path)
89
+
90
+ save_restore_connector = NLPSaveRestoreConnector()
91
+ if model_path.is_dir():
92
+ save_restore_connector.model_extracted_dir = model_path.as_posix()
93
+ pretrained_cfg = save_restore_connector.restore_from(
94
+ None, model_path.as_posix(), return_config=True, trainer=trainer
95
+ )
96
+ if not hasattr(pretrained_cfg, "target"):
97
+ pretrained_cfg["target"] = _get_target_from_class(MegatronGPTModel)
98
+
99
+ pretrained_cfg = _patch_pretrained_cfg(
100
+ pretrained_cfg,
101
+ trainer,
102
+ tensor_model_parallel_size=tensor_model_parallel_size,
103
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
104
+ )
105
+
106
+ model_to_load_path = model_path
107
+ override_config = pretrained_cfg
108
+
109
+ module_name, class_name = override_config.target.rsplit(".", 1)
110
+ model_class = getattr(importlib.import_module(module_name), class_name)
111
+
112
+ # monkeypatch _build_tokenizer method to be process-safe
113
+ tokenizer_lock = filelock.FileLock(f"/tmp/{model_path.name}.tokenizer.lock")
114
+
115
+ def _synced_build_tokenizer(self):
116
+ with tokenizer_lock:
117
+ self._original_build_tokenizer()
118
+
119
+ model_class._original_build_tokenizer = model_class._build_tokenizer
120
+ model_class._build_tokenizer = _synced_build_tokenizer
121
+
122
+ model = model_class.restore_from(
123
+ restore_path=model_to_load_path.as_posix(),
124
+ trainer=trainer,
125
+ override_config_path=override_config,
126
+ save_restore_connector=save_restore_connector,
127
+ map_location=f"cuda:{trainer.local_rank}",
128
+ )
129
+
130
+ model.freeze()
131
+ model.training = False
132
+ try:
133
+ # Have to turn off activations_checkpoint_method for inference
134
+ model.model.language_model.encoder.activations_checkpoint_method = None
135
+ except AttributeError:
136
+ pass
137
+ return model
138
+
139
+
140
+ def setup_distributed_environment(trainer):
141
+ try:
142
+ from nemo.utils.app_state import AppState
143
+ except ModuleNotFoundError:
144
+ raise Exception(
145
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
146
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
147
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
148
+ )
149
+
150
+ def dummy():
151
+ return
152
+
153
+ if trainer.strategy.launcher is not None:
154
+ trainer.strategy.launcher.launch(dummy, trainer=trainer)
155
+ trainer.strategy.setup_environment()
156
+
157
+ app_state = AppState()
158
+
159
+ return app_state
160
+
161
+
162
+ @register_model("nemo_lm")
163
+ class NeMoLM(LM):
164
+ def __init__(
165
+ self,
166
+ path: str,
167
+ max_length: int = 4096,
168
+ batch_size: int = 1,
169
+ max_gen_toks: int = 256,
170
+ devices: int = 1,
171
+ num_nodes: int = 1,
172
+ tensor_model_parallel_size: int = 1,
173
+ pipeline_model_parallel_size: int = 1,
174
+ precision: Literal[
175
+ "16-mixed",
176
+ "bf16-mixed",
177
+ "32-true",
178
+ "64-true",
179
+ 64,
180
+ 32,
181
+ 16,
182
+ "64",
183
+ "32",
184
+ "16",
185
+ "bf16",
186
+ ] = "bf16",
187
+ **kwargs,
188
+ ):
189
+ try:
190
+ from nemo.collections.nlp.modules.common.text_generation_utils import (
191
+ generate,
192
+ )
193
+ from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
194
+ from pytorch_lightning.trainer.trainer import Trainer
195
+
196
+ self.generate = generate
197
+ except ModuleNotFoundError:
198
+ raise Exception(
199
+ "Attempted to use 'nemo_lm' model type, but package `nemo` is not installed"
200
+ "Please install nemo following the instructions in the README: either with a NVIDIA PyTorch or NeMo container, "
201
+ "or installing nemo following https://github.com/NVIDIA/NeMo.",
202
+ )
203
+
204
+ super().__init__()
205
+
206
+ if (
207
+ tensor_model_parallel_size == 1
208
+ and pipeline_model_parallel_size == 1
209
+ and devices > 1
210
+ ):
211
+ eval_logger.info(
212
+ f"The number of data replicas for evaluation is {devices}."
213
+ )
214
+ eval_logger.info(f"The total number of devices is {devices}.")
215
+ eval_logger.info(
216
+ "No tensor parallelism or pipeline parallelism is applied."
217
+ )
218
+
219
+ elif tensor_model_parallel_size * pipeline_model_parallel_size == devices:
220
+ eval_logger.info(
221
+ f"Setting tensor parallelism to {tensor_model_parallel_size} and pipeline parallelism to {pipeline_model_parallel_size}."
222
+ )
223
+ eval_logger.info(f"The total number of devices is {devices}.")
224
+ eval_logger.info("No data parallelism is applied.")
225
+
226
+ else:
227
+ raise ValueError(
228
+ "Please set the product of tensor_model_parallel_size and pipeline_model_parallel_size"
229
+ "equal to the specified number of devices."
230
+ )
231
+
232
+ if num_nodes > 1:
233
+ raise ValueError(
234
+ "A number of nodes greater than 1 is not supported yet. Please set num_nodes as 1."
235
+ )
236
+
237
+ trainer = Trainer(
238
+ strategy=NLPDDPStrategy(),
239
+ devices=devices,
240
+ accelerator="gpu",
241
+ num_nodes=num_nodes,
242
+ precision=precision,
243
+ logger=False,
244
+ enable_checkpointing=False,
245
+ use_distributed_sampler=False,
246
+ )
247
+ # Modify the following flags only for data replication
248
+ if (
249
+ tensor_model_parallel_size == 1
250
+ and pipeline_model_parallel_size == 1
251
+ and devices > 1
252
+ ):
253
+ self._device = torch.device(f"cuda:{trainer.global_rank}")
254
+ self._rank = trainer.global_rank
255
+ self._world_size = trainer.world_size
256
+ self.model = load_model(
257
+ path,
258
+ trainer,
259
+ tensor_model_parallel_size=tensor_model_parallel_size,
260
+ pipeline_model_parallel_size=pipeline_model_parallel_size,
261
+ ).cuda()
262
+ self.tokenizer = self.model.tokenizer
263
+ self.app_state = setup_distributed_environment(trainer)
264
+
265
+ self._max_length = max_length
266
+ self._batch_size = int(batch_size)
267
+ self._max_gen_toks = max_gen_toks
268
+
269
+ @classmethod
270
+ def create_from_arg_string(cls, arg_string, additional_config=None):
271
+ args = simple_parse_args_string(arg_string)
272
+ if additional_config:
273
+ args["batch_size"] = additional_config.get("batch_size", 1)
274
+
275
+ return cls(**args)
276
+
277
+ @property
278
+ def eot_token_id(self):
279
+ try:
280
+ return self.tokenizer.eos_id
281
+ except AttributeError:
282
+ return None
283
+
284
+ @property
285
+ def max_length(self):
286
+ return self._max_length
287
+
288
+ @property
289
+ def max_gen_toks(self):
290
+ return self._max_gen_toks
291
+
292
+ @property
293
+ def batch_size(self):
294
+ return self._batch_size
295
+
296
+ @property
297
+ def device(self):
298
+ return self._device
299
+
300
+ @property
301
+ def rank(self):
302
+ return self._rank
303
+
304
+ @property
305
+ def world_size(self):
306
+ return self._world_size
307
+
308
+ @property
309
+ def accelerator(self):
310
+ return self._Accelerator(self.world_size)
311
+
312
+ class _Accelerator:
313
+ def __init__(self, world_size):
314
+ self.world_size = world_size
315
+
316
+ def wait_for_everyone(self):
317
+ torch.distributed.barrier()
318
+
319
+ def gather(self, local_tensor):
320
+ gathered_tensors = [
321
+ torch.zeros(1, dtype=local_tensor.dtype).cuda()
322
+ for _ in range(self.world_size)
323
+ ]
324
+ torch.distributed.all_gather(gathered_tensors, local_tensor)
325
+ return torch.cat(gathered_tensors)
326
+
327
+ def tok_encode(self, string: str):
328
+ return self.tokenizer.text_to_ids(string)
329
+
330
+ def tok_decode(self, tokens):
331
+ return self.tokenizer.ids_to_text(tokens)
332
+
333
+ def _encode_pair(self, context, continuation):
334
+ n_spaces = len(context) - len(context.rstrip())
335
+ if n_spaces > 0:
336
+ continuation = context[-n_spaces:] + continuation
337
+ context = context[:-n_spaces]
338
+ whole_enc = self.tok_encode(context + continuation)
339
+ context_enc = self.tok_encode(context)
340
+ context_enc_len = len(context_enc)
341
+ continuation_enc = whole_enc[context_enc_len:]
342
+ return context_enc, continuation_enc
343
+
344
+ def loglikelihood(self, requests):
345
+ new_reqs = []
346
+ for context, continuation in [req.args for req in requests]:
347
+ if context == "":
348
+ # end of text as context
349
+ context_enc, continuation_enc = (
350
+ [self.eot_token_id],
351
+ self.tok_encode(continuation),
352
+ )
353
+ else:
354
+ context_enc, continuation_enc = self._encode_pair(context, continuation)
355
+
356
+ new_reqs.append(((context, continuation), context_enc, continuation_enc))
357
+
358
+ return self._loglikelihood_tokens(new_reqs)
359
+
360
+ def loglikelihood_rolling(
361
+ self, requests: List[Instance], disable_tqdm: bool = False
362
+ ) -> List[float]:
363
+ loglikelihoods = []
364
+
365
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
366
+ rolling_token_windows = list(
367
+ map(
368
+ make_disjoint_window,
369
+ get_rolling_token_windows(
370
+ token_list=self.tok_encode(string),
371
+ prefix_token=self.eot_token_id,
372
+ max_seq_len=self.max_length - 1,
373
+ context_len=1,
374
+ ),
375
+ )
376
+ )
377
+
378
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
379
+
380
+ string_nll = self._loglikelihood_tokens(
381
+ rolling_token_windows,
382
+ )
383
+
384
+ # discard is_greedy
385
+ string_nll = [x[0] for x in string_nll]
386
+
387
+ string_nll = sum(string_nll)
388
+ loglikelihoods.append(string_nll)
389
+ return loglikelihoods
390
+
391
+ def _loglikelihood_tokens(self, requests, disable_tqdm=False):
392
+ res = []
393
+
394
+ def _collate(x):
395
+ toks = x[1] + x[2]
396
+ return -len(toks), tuple(toks)
397
+
398
+ re_ord = Collator(requests, sort_fn=_collate)
399
+ chunks = re_ord.get_batched(n=self.batch_size, batch_fn=None)
400
+ pbar = tqdm(
401
+ total=len(requests),
402
+ disable=(disable_tqdm or (self.rank != 0)),
403
+ desc="Running loglikelihood requests",
404
+ )
405
+ for chunk in chunks:
406
+ inps = []
407
+ ctxlens = []
408
+ contlens = []
409
+
410
+ for _, context_enc, continuation_enc in chunk:
411
+ # Leave one token for generation. Tokens_to_generate = 0 breaks NeMo.
412
+ inp = (context_enc + continuation_enc)[-(self.max_length - 1) :]
413
+
414
+ ctxlen = len(context_enc) - max(
415
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length - 1)
416
+ )
417
+ ctxlens.append(ctxlen)
418
+ contlens.append(len(continuation_enc))
419
+
420
+ inps.append(self.tok_decode(inp))
421
+
422
+ output = self.generate(
423
+ self.model,
424
+ inputs=inps,
425
+ tokens_to_generate=1,
426
+ min_tokens_to_generate=1,
427
+ compute_logprob=True,
428
+ all_probs=True,
429
+ )
430
+
431
+ batch_token_ids = np.asarray(output["token_ids"])[:, :-1]
432
+ batch_logprobs = output["logprob"][:, :-1]
433
+ batch_full_logprob = output["full_logprob"][:, :-1, :]
434
+
435
+ # Compute greedy tokens for entire batch rather than calling it with proper ctxlen for each sample.
436
+ # Additional tokens for each sample will be trimmed later.
437
+ min_ctxlen = min(ctxlens)
438
+
439
+ # Use min_ctxlen-1 instead of min_ctxlen since full_logprobs are not returns for the first token.
440
+ batch_greedy_tokens = (
441
+ torch.argmax(batch_full_logprob[:, min_ctxlen - 1 :, :], -1)
442
+ .cpu()
443
+ .numpy()
444
+ )
445
+
446
+ for token_ids, greedy_tokens, logprobs, ctxlen, contlen, (
447
+ cache_key,
448
+ _,
449
+ _,
450
+ ) in zip(
451
+ batch_token_ids,
452
+ batch_greedy_tokens,
453
+ batch_logprobs,
454
+ ctxlens,
455
+ contlens,
456
+ chunk,
457
+ ):
458
+ # Trim at contlen since shorter contexts in a batch will have more than one token generated.
459
+ # Use ctxlen-1 instead of ctxlen same as for full_logprob in batch_greedy_tokens calculation
460
+ logprobs = (logprobs[ctxlen - 1 :])[:contlen]
461
+ logprob = sum(logprobs).tolist()
462
+
463
+ continuation_tokens = (token_ids[ctxlen:])[:contlen]
464
+ len_diff = ctxlen - min_ctxlen
465
+ is_greedy = continuation_tokens == (greedy_tokens[len_diff:])[:contlen]
466
+ if not isinstance(is_greedy, bool):
467
+ is_greedy = is_greedy.all()
468
+ answer = (logprob, is_greedy)
469
+
470
+ if cache_key is not None:
471
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
472
+
473
+ res.append(answer)
474
+ pbar.update(1)
475
+
476
+ pbar.close()
477
+
478
+ return re_ord.get_original(res)
479
+
480
+ def generate_until(self, requests):
481
+ if not requests:
482
+ return []
483
+ res = []
484
+
485
+ def get_until(req_args):
486
+ until = req_args.get("until", [])
487
+ until = deepcopy(until) # prevent from modifying req_args for cache_key
488
+ if self.eot_token_id not in until:
489
+ until.append(self.eot_token_id)
490
+ return until
491
+
492
+ def _collate(x):
493
+ toks = self.tok_encode(x[0])
494
+ return len(toks), x[0]
495
+
496
+ re_ords = Collator(
497
+ [reg.args for reg in requests], sort_fn=_collate, group_by="gen_kwargs"
498
+ )
499
+ chunks = re_ords.get_batched(n=self.batch_size, batch_fn=None)
500
+ for chunk in chunks:
501
+ contexts, all_gen_kwargs = zip(*chunk)
502
+ # we assume all gen kwargs in the batch are the same
503
+ # this is safe to assume because the `grouper` object ensures it.
504
+ req_args = all_gen_kwargs[0]
505
+ # unpack our keyword arguments.
506
+ until = get_until(req_args)
507
+ max_gen_toks = req_args.get("max_gen_toks", self.max_gen_toks)
508
+
509
+ remaining_length = self.max_length - max_gen_toks
510
+ contexts = []
511
+ for context, _ in chunk:
512
+ encoded_context = self.tok_encode(context)
513
+ encoded_context = encoded_context[-remaining_length:]
514
+ contexts.append(self.tok_decode(encoded_context))
515
+
516
+ output = self.generate(
517
+ self.model,
518
+ inputs=contexts,
519
+ tokens_to_generate=max_gen_toks,
520
+ end_strings=until,
521
+ greedy=True,
522
+ )
523
+
524
+ answers = output["sentences"]
525
+
526
+ continuations = []
527
+ for context, answer in zip(contexts, answers):
528
+ continuations.append(answer[len(context) :])
529
+
530
+ for term in until:
531
+ continuations = [answer.split(term)[0] for answer in continuations]
532
+
533
+ for request, answer in zip(chunk, continuations):
534
+ self.cache_hook.add_partial("greedy_until", request, answer)
535
+ res.append(answer)
536
+
537
+ return re_ords.get_original(res)
lm-evaluation/lm_eval/models/neuron_optimum.py ADDED
@@ -0,0 +1,736 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import json
3
+ import logging
4
+ import subprocess
5
+ from collections import defaultdict
6
+ from typing import List, Optional, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ import transformers
11
+ from packaging import version
12
+ from tqdm import tqdm
13
+ from transformers import GenerationConfig
14
+ from transformers.generation import StoppingCriteriaList
15
+
16
+ import lm_eval.models.utils
17
+ from lm_eval import utils
18
+ from lm_eval.api.model import TemplateLM
19
+ from lm_eval.api.registry import register_model
20
+ from lm_eval.models.utils import stop_sequences_criteria
21
+
22
+
23
+ try:
24
+ NEURON_AVAILABLE = True
25
+ from optimum.neuron import NeuronModelForCausalLM
26
+ from optimum.neuron.generation import TokenSelector
27
+ from optimum.neuron.version import __version__ as optimum_neuron_version
28
+ except ImportError:
29
+ NeuronModelForCausalLM = object
30
+ NEURON_AVAILABLE = False
31
+
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ def get_nc_count() -> Union[int, None]:
37
+ """Returns the number of neuron cores on the current instance."""
38
+ try:
39
+ cmd = "neuron-ls --json-output"
40
+ result = subprocess.run(cmd, shell=True, capture_output=True)
41
+ print(f"inferring nc_count from `neuron-ls` {result.stdout}")
42
+ json_output = json.loads(result.stdout)
43
+ count = sum([x["nc_count"] for x in json_output])
44
+ print(f"nc_count={count}")
45
+ return count
46
+ except Exception:
47
+ return None
48
+
49
+
50
+ def wrap_constant_batch_size(func):
51
+ def _decorator(self, input_ids):
52
+ """input_ids a 2D array with batch_size on dim=0
53
+
54
+ makes sure the func runs with self.batch_size
55
+ """
56
+ # access a from TestSample
57
+ batch_size = input_ids.shape[0]
58
+
59
+ if batch_size < self.batch_size:
60
+ # handle the event of input_ids.shape[0] != batch_size
61
+ # Neuron cores expect constant batch_size
62
+ input_ids = torch.concat(
63
+ (
64
+ input_ids,
65
+ # add missing_batch_size dummy
66
+ torch.zeros(
67
+ [self.batch_size - batch_size, *input_ids.size()[1:]],
68
+ dtype=input_ids.dtype,
69
+ device=input_ids.device,
70
+ ),
71
+ ),
72
+ dim=0,
73
+ )
74
+ elif batch_size > self.batch_size:
75
+ raise ValueError(
76
+ f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})"
77
+ )
78
+ # return the forward pass that requires constant batch size
79
+ return func(self, input_ids)[:batch_size]
80
+
81
+ return _decorator
82
+
83
+
84
+ class CustomNeuronModelForCausalLM(NeuronModelForCausalLM):
85
+ """NeuronModelForCausalLM with `stopping_criteria` in `generate`"""
86
+
87
+ def generate(
88
+ self,
89
+ input_ids: torch.Tensor,
90
+ attention_mask: Optional[torch.Tensor] = None,
91
+ stopping_criteria: Optional["StoppingCriteriaList"] = None,
92
+ generation_config: Optional["GenerationConfig"] = None,
93
+ **kwargs,
94
+ ) -> torch.LongTensor:
95
+ r"""
96
+ A streamlined generate() method overriding the transformers.GenerationMixin.generate() method.
97
+
98
+ This method uses the same logits processors/warpers and stopping criteria as the transformers library
99
+ `generate()` method but restricts the generation to greedy search and sampling.
100
+
101
+ It does not support transformers `generate()` advanced options.
102
+
103
+ Please refer to https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.GenerationMixin.generate
104
+ for details on generation configuration.
105
+
106
+ Parameters:
107
+ input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`):
108
+ The sequence used as a prompt for the generation.
109
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
110
+ Mask to avoid performing attention on padding token indices.
111
+ generation_config (`~transformers.generation.GenerationConfig`, *optional*):
112
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
113
+ passed to generate matching the attributes of `generation_config` will override them. If
114
+ `generation_config` is not provided, default will be used, which had the following loading
115
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
116
+ configuration. Please note that unspecified parameters will inherit [`~transformers.generation.GenerationConfig`]'s
117
+ default values, whose documentation should be checked to parameterize generation.
118
+
119
+ Returns:
120
+ `torch.Tensor`: A `torch.FloatTensor`.
121
+ """
122
+ # The actual generation configuration is a combination of config and parameters
123
+ generation_config = copy.deepcopy(
124
+ self.generation_config if generation_config is None else generation_config
125
+ )
126
+ model_kwargs = generation_config.update(
127
+ **kwargs
128
+ ) # All unused kwargs must be model kwargs
129
+ # Check model kwargs are actually used by either prepare_inputs_for_generation or forward
130
+ self._validate_model_kwargs(model_kwargs)
131
+
132
+ # Instantiate a TokenSelector for the specified configuration
133
+ selector = TokenSelector.create(
134
+ input_ids, generation_config, self, self.max_length
135
+ )
136
+ selector.stopping_criteria.append(stopping_criteria)
137
+ # Verify that the inputs are compatible with the model static input dimensions
138
+ batch_size, sequence_length = input_ids.shape
139
+ if sequence_length > self.max_length:
140
+ raise ValueError(
141
+ f"The input sequence length ({sequence_length}) exceeds the model static sequence length ({self.max_length})"
142
+ )
143
+ padded_input_ids = input_ids
144
+ padded_attention_mask = attention_mask
145
+ if batch_size > self.batch_size:
146
+ raise ValueError(
147
+ f"The specified batch_size ({batch_size}) exceeds the model static batch size ({self.batch_size})"
148
+ )
149
+ elif batch_size < self.batch_size:
150
+ logger.warning(
151
+ "Inputs will be padded to match the model static batch size. This will increase latency."
152
+ )
153
+ padding_shape = [self.batch_size - batch_size, sequence_length]
154
+ padding = torch.full(
155
+ padding_shape, fill_value=self.config.eos_token_id, dtype=torch.int64
156
+ )
157
+ padded_input_ids = torch.cat([input_ids, padding])
158
+ if attention_mask is not None:
159
+ padding = torch.zeros(padding_shape, dtype=torch.int64)
160
+ padded_attention_mask = torch.cat([attention_mask, padding])
161
+ # Drop the current generation context and clear the Key/Value cache
162
+ self.reset_generation()
163
+
164
+ output_ids = self.generate_tokens(
165
+ padded_input_ids,
166
+ selector,
167
+ batch_size,
168
+ attention_mask=padded_attention_mask,
169
+ **model_kwargs,
170
+ )
171
+ return output_ids[:batch_size, :]
172
+
173
+
174
+ @register_model("neuronx")
175
+ class NEURON_HF(TemplateLM):
176
+ """
177
+ Enables usage with on AWS Neuron
178
+ using the HuggingFace Transformers + Transformers neuronx library.
179
+ Tested with neuron 2.17.0
180
+ """
181
+
182
+ _DEFAULT_MAX_LENGTH = 2048
183
+
184
+ def __init__(
185
+ self,
186
+ pretrained: Optional[str] = "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
187
+ revision: Optional[str] = "main",
188
+ tp_degree: Optional[int] = None,
189
+ subfolder: Optional[str] = None,
190
+ tokenizer: Optional[str] = None,
191
+ truncation: Optional[bool] = False,
192
+ max_length: Optional[int] = None,
193
+ dtype: Optional[Union[str, torch.dtype]] = "auto",
194
+ batch_size: Optional[int] = 1,
195
+ low_cpu_mem_usage: Optional[bool] = True,
196
+ trust_remote_code: Optional[bool] = False,
197
+ use_fast_tokenizer: Optional[bool] = True,
198
+ add_bos_token: Optional[bool] = False,
199
+ ) -> None:
200
+ if not NEURON_AVAILABLE:
201
+ raise Exception(
202
+ "Tried to load neuron model, but neuron is not installed ",
203
+ "please install neuron via pip install transformers-neuron ",
204
+ "also make sure you are running on an AWS inf2 instance",
205
+ )
206
+ if version.parse(optimum_neuron_version) != version.parse("0.0.17"):
207
+ logger.warning(
208
+ '`optimum-neuron` model requires `pip install "optimum[neuronx]>=0.0.17" '
209
+ "preferably using the Hugging Face Neuron Deep Learning AMI (Ubuntu 22.04) "
210
+ "https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2 "
211
+ f"You are using optimum-neuron={optimum_neuron_version}"
212
+ )
213
+ super().__init__()
214
+
215
+ assert isinstance(pretrained, str)
216
+ assert isinstance(batch_size, (int, str))
217
+
218
+ self.batch_size_per_gpu = int(batch_size)
219
+ batch_size = int(batch_size)
220
+ if tp_degree is None:
221
+ # execute `neuron-ls --json-output | jq '.[0].nc_count'``
222
+ # to get the number of neuron cores on your instance
223
+ tp_degree = get_nc_count()
224
+
225
+ assert isinstance(tp_degree, int), (
226
+ f"model_args must include tp_degree. tp_degree must be set to an integer,"
227
+ f" but is tp_degree=`{tp_degree}` with type=`{type(tp_degree)}`."
228
+ "Set it to number of neuron cores on your instance."
229
+ " For inf2.xlarge and inf2.8xlarge, set it to `2`."
230
+ " For inf2.24xlarge, set it to `12`."
231
+ " For inf2.48xlarge, set it to `24`."
232
+ )
233
+
234
+ # TODO: update this to be less of a hack once subfolder is fixed in HF
235
+ revision = revision + ("/" + subfolder if subfolder is not None else "")
236
+
237
+ self._config = transformers.AutoConfig.from_pretrained(
238
+ pretrained,
239
+ revision=revision,
240
+ trust_remote_code=trust_remote_code,
241
+ )
242
+ torch_dtype = lm_eval.models.utils.get_dtype(dtype)
243
+
244
+ assert torch_dtype in [
245
+ torch.float16,
246
+ torch.bfloat16,
247
+ ], "Only float16 and bfloat16 are supported"
248
+
249
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
250
+ pretrained if tokenizer is None else tokenizer,
251
+ revision=revision,
252
+ trust_remote_code=trust_remote_code,
253
+ use_fast=use_fast_tokenizer,
254
+ )
255
+
256
+ # Neuron specific code
257
+ if torch_dtype == torch.float16:
258
+ self.amp_dtype = "f16"
259
+ elif torch_dtype == torch.bfloat16:
260
+ self.amp_dtype = "bf16"
261
+ elif torch_dtype == torch.float32:
262
+ self.amp_dtype = "f32"
263
+ else:
264
+ raise NotImplementedError("Only float16 and bfloat16 are implemented.")
265
+
266
+ compiler_args = {"num_cores": tp_degree, "auto_cast_type": self.amp_dtype}
267
+ input_shapes = {
268
+ "batch_size": batch_size,
269
+ "sequence_length": self._DEFAULT_MAX_LENGTH,
270
+ }
271
+
272
+ print(
273
+ f"{'='*20} \n loading model to neuron with"
274
+ f" {compiler_args}, {input_shapes}..."
275
+ )
276
+ self.model = CustomNeuronModelForCausalLM.from_pretrained(
277
+ pretrained,
278
+ revision=revision,
279
+ trust_remote_code=trust_remote_code,
280
+ low_cpu_mem_usage=low_cpu_mem_usage,
281
+ export=True,
282
+ **compiler_args,
283
+ **input_shapes,
284
+ )
285
+ print(f"SUCCESS: neuron model compiled. \n {'='*20}")
286
+
287
+ self.truncation = truncation
288
+
289
+ self.vocab_size = self.tokenizer.vocab_size
290
+ self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
291
+ self.add_bos_token = self.add_bos_token
292
+
293
+ self._max_length = max_length
294
+
295
+ self.batch_schedule = 1
296
+ self.batch_sizes = {}
297
+
298
+ @property
299
+ def config(self):
300
+ # return the associated transformers.AutoConfig for the given pretrained model.
301
+ return self._config
302
+
303
+ @property
304
+ def eot_token_id(self):
305
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
306
+ return self.tokenizer.eos_token_id
307
+
308
+ @property
309
+ def prefix_token_id(self):
310
+ # it is used as prefix for loglikelihood
311
+ return self.tokenizer.bos_token_id or self.tokenizer.eos_token_id
312
+
313
+ @property
314
+ def max_length(self):
315
+ if self._max_length: # if max length manually set, return it
316
+ return self._max_length
317
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
318
+ for attr in seqlen_config_attrs:
319
+ if hasattr(self.model.config, attr):
320
+ return getattr(self.model.config, attr)
321
+ if hasattr(self.tokenizer, "model_max_length"):
322
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
323
+ return self._DEFAULT_MAX_LENGTH
324
+ return self.tokenizer.model_max_length
325
+ return self._DEFAULT_MAX_LENGTH
326
+
327
+ @property
328
+ def max_gen_toks(self) -> int:
329
+ return 256
330
+
331
+ @property
332
+ def batch_size(self):
333
+ return self.batch_size_per_gpu
334
+
335
+ @property
336
+ def device(self):
337
+ """device are neuron cores, but the created tensors are on CPU."""
338
+ return "cpu"
339
+
340
+ @property
341
+ def rank(self):
342
+ return 0
343
+
344
+ @property
345
+ def world_size(self):
346
+ return 1
347
+
348
+ def tok_encode(self, string: str, left_truncate_len=None, add_special_tokens=None):
349
+ """ """
350
+ if add_special_tokens is None:
351
+ add_special_tokens = False or self.add_bos_token
352
+
353
+ encoding = self.tokenizer.encode(string, add_special_tokens=add_special_tokens)
354
+
355
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
356
+ if left_truncate_len:
357
+ encoding = encoding[-left_truncate_len:]
358
+
359
+ return encoding
360
+
361
+ def tok_batch_encode(
362
+ self,
363
+ strings: List[str],
364
+ padding_side: str = "left",
365
+ left_truncate_len: int = None,
366
+ truncation: bool = False,
367
+ ):
368
+ # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode.
369
+ old_padding_side = self.tokenizer.padding_side
370
+ self.tokenizer.padding_side = padding_side
371
+
372
+ add_special_tokens = False or self.add_bos_token
373
+
374
+ encoding = self.tokenizer(
375
+ strings,
376
+ truncation=truncation,
377
+ padding="longest",
378
+ return_tensors="pt",
379
+ add_special_tokens=add_special_tokens,
380
+ )
381
+ if left_truncate_len:
382
+ encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:]
383
+ encoding["attention_mask"] = encoding["attention_mask"][
384
+ :, -left_truncate_len:
385
+ ]
386
+ self.tokenizer.padding_side = old_padding_side
387
+
388
+ return encoding["input_ids"], encoding["attention_mask"]
389
+
390
+ def tok_decode(self, tokens):
391
+ return self.tokenizer.decode(tokens)
392
+
393
+ @wrap_constant_batch_size
394
+ def _model_call(self, input_ids: torch.Tensor):
395
+ """
396
+ get logits for the entire sequence
397
+
398
+ :param input_ids: torch.Tensor
399
+ A torch tensor of shape [batch, sequence_cont]
400
+ the size of sequence may vary from call to call
401
+ :return
402
+ A torch tensor of shape [batch, sequence, vocab] with the
403
+ logits returned from the model's decoder-lm head
404
+ """
405
+ _, sequence_length = input_ids.shape
406
+
407
+ with torch.inference_mode():
408
+ cache_ids = torch.arange(0, sequence_length, dtype=torch.int32).split(1)
409
+ input_ids_split = input_ids.split(1, dim=1)
410
+
411
+ return torch.concat(
412
+ [
413
+ self.model.forward(
414
+ input_ids=input_id, cache_ids=cache_id, return_dict=False
415
+ )[0]
416
+ for input_id, cache_id in zip(input_ids_split, cache_ids)
417
+ ],
418
+ dim=1,
419
+ )
420
+
421
+ def _model_generate(self, context, max_length, stop, **generation_kwargs):
422
+ # we require users to pass do_sample=True explicitly
423
+ # for non-greedy gen. This should be reevaluated when considering beam search.
424
+
425
+ with torch.inference_mode():
426
+ if "do_sample" not in generation_kwargs.keys():
427
+ generation_kwargs["do_sample"] = False
428
+
429
+ stopping_criteria = stop_sequences_criteria(
430
+ self.tokenizer,
431
+ stop + [self.tokenizer.decode([self.config.eos_token_id])],
432
+ 1,
433
+ context.shape[0],
434
+ )
435
+
436
+ return self.model.generate(
437
+ input_ids=context,
438
+ max_length=max_length,
439
+ stopping_criteria=stopping_criteria,
440
+ pad_token_id=self.eot_token_id,
441
+ use_cache=True,
442
+ **generation_kwargs,
443
+ )
444
+
445
+ def _select_cont_toks(self, logits, contlen=None, inplen=None):
446
+ assert (
447
+ contlen and inplen
448
+ ), "Must pass input len and cont. len to select scored logits for causal LM"
449
+ # discard right-padding.
450
+ # also discard the input/context tokens. we'll only score continuations.
451
+ logits = logits[inplen - contlen : inplen]
452
+
453
+ return logits
454
+
455
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
456
+ loglikelihoods = []
457
+
458
+ adaptive_batch_size = None
459
+
460
+ for (string,) in tqdm(
461
+ [req.args for req in requests], disable=(disable_tqdm or (self.rank != 0))
462
+ ):
463
+ rolling_token_windows = list(
464
+ map(
465
+ utils.make_disjoint_window,
466
+ utils.get_rolling_token_windows(
467
+ token_list=self.tok_encode(string),
468
+ prefix_token=self.prefix_token_id,
469
+ max_seq_len=self.max_length,
470
+ context_len=1,
471
+ ),
472
+ )
473
+ )
474
+
475
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
476
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
477
+
478
+ pad_amnt = 0
479
+ if self.world_size > 1:
480
+ # We pad out the external document-level iterator so the inner iterator doesn't hang
481
+ mytensor = torch.tensor(len(rolling_token_windows), device=self.device)
482
+ gathered = (
483
+ self.accelerator.gather(mytensor).cpu().detach().numpy().tolist()
484
+ )
485
+
486
+ pad_amnt = max(gathered) - gathered[self.rank]
487
+ if pad_amnt > 0:
488
+ rolling_token_windows += pad_amnt * [rolling_token_windows[0]]
489
+
490
+ string_nll = self._loglikelihood_tokens(
491
+ rolling_token_windows,
492
+ disable_tqdm=True,
493
+ override_bs=adaptive_batch_size,
494
+ )
495
+
496
+ if (self.world_size > 1) and (pad_amnt > 0):
497
+ string_nll = [x[0] for x in string_nll[:-pad_amnt]]
498
+ else:
499
+ # discard is_greedy
500
+ string_nll = [x[0] for x in string_nll]
501
+
502
+ string_nll = sum(string_nll)
503
+ loglikelihoods.append(string_nll)
504
+
505
+ return loglikelihoods
506
+
507
+ def _loglikelihood_tokens(
508
+ self, requests, disable_tqdm: bool = False, override_bs=None
509
+ ):
510
+ # TODO: implement some kind of efficient-request-middleware that lumps together requests with the same context
511
+ res = []
512
+
513
+ def _collate(x):
514
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
515
+ # - time estimates will always be over not underestimates, which is more useful for planning
516
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
517
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
518
+ # automatic adaptive batches much much easier to implement
519
+ # - any OOMs will happen right away rather than near the end
520
+
521
+ toks = x[1] + x[2]
522
+ return -len(toks), tuple(toks)
523
+
524
+ re_ord = utils.Reorderer(requests, _collate)
525
+
526
+ n_reordered_requests = len(re_ord.get_reordered()) # noqa
527
+ # automatic (variable) batch size detection for vectorization
528
+ # pull longest context sample from request
529
+
530
+ chunks = lm_eval.models.utils.chunks(
531
+ re_ord.get_reordered(),
532
+ n=self.batch_size,
533
+ fn=None,
534
+ )
535
+
536
+ for chunk in tqdm(chunks, disable=(disable_tqdm or (self.rank != 0))):
537
+ inps = []
538
+ cont_toks_list = []
539
+ inplens = []
540
+
541
+ conts = [] # noqa
542
+ encoder_attns = [] # noqa
543
+
544
+ padding_len_inp = None
545
+ padding_len_cont = None # noqa
546
+ # because vectorizing is annoying, we first convert each (context, continuation) pair to padded
547
+ # tensors, then we pack them together into a batch, call the model, and then pick it all apart
548
+ # again because vectorizing is annoying
549
+
550
+ for _, context_enc, continuation_enc in chunk:
551
+ # sanity check
552
+ assert len(context_enc) > 0
553
+ assert len(continuation_enc) > 0
554
+ assert len(continuation_enc) <= self.max_length
555
+
556
+ # how this all works (illustrated on a causal decoder-only setup):
557
+ # CTX CONT
558
+ # inp 0 1 2 3|4 5 6 7 8 9 <- last token is deleted by inp[:, :-1]
559
+ # model \ \
560
+ # logits 1 2 3|4 5 6 7 8 9 <- the ctx half gets tossed out by the
561
+ # cont_toks 4 5 6 7 8 9 [:, -len(continuation_enc):, :self.vocab_size] slice
562
+
563
+ # when too long to fit in context, truncate from the left
564
+ inp = torch.tensor(
565
+ (context_enc + continuation_enc)[-(self.max_length + 1) :][:-1],
566
+ dtype=torch.long,
567
+ device=self.device,
568
+ )
569
+ (inplen,) = inp.shape
570
+
571
+ padding_len_inp = (
572
+ max(padding_len_inp, inplen)
573
+ if padding_len_inp is not None
574
+ else inplen
575
+ )
576
+
577
+ inps.append(inp) # [1, inp_length]
578
+ cont_toks_list.append(continuation_enc)
579
+ inplens.append(inplen)
580
+
581
+ # create encoder attn mask and batched conts, if seq2seq
582
+ call_kwargs = {}
583
+ batched_inps = lm_eval.models.utils.pad_and_concat(
584
+ padding_len_inp, inps, padding_side="right"
585
+ ) # [batch, padding_len_inp]
586
+
587
+ multi_logits = F.log_softmax(
588
+ self._model_call(batched_inps, **call_kwargs), dim=-1
589
+ ) # [batch, padding_length (inp or cont), vocab]
590
+
591
+ for (cache_key, _, _), logits, inplen, cont_toks in zip(
592
+ chunk, multi_logits, inplens, cont_toks_list
593
+ ):
594
+ # Slice to original seq length
595
+ contlen = len(cont_toks)
596
+ # take only logits in the continuation
597
+ # (discard context toks if decoder-only ; discard right-padding)
598
+ # also discards + checks for "virtual tokens" in the causal LM's input window
599
+ # from prompt/prefix tuning tokens, if applicable
600
+ ctx_len = inplen + (logits.shape[0] - padding_len_inp)
601
+ logits = self._select_cont_toks(logits, contlen=contlen, inplen=ctx_len)
602
+ logits = logits.unsqueeze(0) # [1, seq, vocab]
603
+
604
+ # Check if per-token argmax is exactly equal to continuation
605
+ greedy_tokens = logits.argmax(dim=-1)
606
+ cont_toks = torch.tensor(
607
+ cont_toks, dtype=torch.long, device=self.device
608
+ ).unsqueeze(0) # [1, seq]
609
+ max_equal = (greedy_tokens == cont_toks).all()
610
+
611
+ # Obtain log-probs at the corresponding continuation token indices
612
+ # last_token_slice = logits[:, -1, :].squeeze(0).tolist()
613
+ logits = torch.gather(logits, 2, cont_toks.unsqueeze(-1)).squeeze(
614
+ -1
615
+ ) # [1, seq]
616
+
617
+ # Answer: (log prob, is-exact-match)
618
+ answer = (float(logits.sum()), bool(max_equal))
619
+
620
+ res.append(answer)
621
+
622
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
623
+
624
+ return re_ord.get_original(res)
625
+
626
+ def generate_until(self, requests, disable_tqdm: bool = False):
627
+ res = defaultdict(list)
628
+ re_ords = {}
629
+
630
+ def _collate(x):
631
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
632
+ # - time estimates will always be over not underestimates, which is more useful for planning
633
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
634
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
635
+ # automatic adaptive batches much much easier to implement
636
+ # - any OOMs will happen right away rather than near the end
637
+ toks = self.tok_encode(x[0])
638
+ return -len(toks), x[0]
639
+
640
+ # we group requests by their generation_kwargs,
641
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
642
+ # in the same batch.
643
+ grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
644
+ for key, reqs in grouper.get_grouped().items():
645
+ # within each set of reqs for given kwargs, we reorder by token length, descending.
646
+ re_ords[key] = utils.Reorderer([req.args for req in reqs], _collate)
647
+
648
+ pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
649
+
650
+ # for each different set of kwargs, we execute all requests, by batch.
651
+ for key, re_ord in re_ords.items():
652
+ chunks = lm_eval.models.utils.chunks(
653
+ re_ord.get_reordered(), n=self.batch_size
654
+ )
655
+ for chunk in tqdm(chunks, disable=self.rank != 0):
656
+ contexts, all_gen_kwargs = zip(*chunk)
657
+ # we assume all gen kwargs in the batch are the same
658
+ # this is safe to assume because the `grouper` object ensures it.
659
+ gen_kwargs = all_gen_kwargs[0]
660
+ # unpack our keyword arguments.
661
+ until = None
662
+ if isinstance(gen_kwargs, dict):
663
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
664
+ if "until" in kwargs.keys():
665
+ until = kwargs.pop("until")
666
+ if isinstance(until, str):
667
+ until = [until]
668
+ elif not isinstance(until, list):
669
+ raise ValueError(
670
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
671
+ )
672
+ else:
673
+ raise ValueError(
674
+ f"Expected `kwargs` to be of type `dict` but got {kwargs}"
675
+ )
676
+ # add EOS token to stop sequences
677
+ eos = self.tok_decode(self.eot_token_id)
678
+ if not until:
679
+ until = [eos]
680
+ else:
681
+ until.append(eos)
682
+ if "max_gen_toks" in kwargs.keys():
683
+ max_gen_toks = kwargs.pop("max_gen_toks")
684
+ else:
685
+ max_gen_toks = self.max_gen_toks
686
+ # first stop sequence is used to halt generation upon encountering
687
+ primary_until = [until[0]]
688
+
689
+ max_ctx_len = self.max_length - max_gen_toks
690
+
691
+ # encode, pad, and truncate contexts for this batch
692
+ context_enc, attn_masks = self.tok_batch_encode(
693
+ contexts,
694
+ left_truncate_len=max_ctx_len,
695
+ truncation=self.truncation,
696
+ )
697
+ context_enc = context_enc.to(self.device)
698
+ attn_masks = attn_masks.to(self.device)
699
+
700
+ if "max_length" not in kwargs:
701
+ kwargs["max_length"] = context_enc.shape[1] + max_gen_toks
702
+
703
+ # perform batched generation
704
+ cont = self._model_generate(
705
+ context=context_enc,
706
+ attention_mask=attn_masks,
707
+ stop=primary_until,
708
+ **kwargs,
709
+ )
710
+
711
+ cont_toks_list = cont.tolist()
712
+ for cont_toks, context in zip(cont_toks_list, contexts):
713
+ # discard context + left-padding toks if using causal decoder-only LM
714
+ cont_toks = cont_toks[context_enc.shape[1] :]
715
+
716
+ s = self.tok_decode(cont_toks)
717
+
718
+ # use secondary stop seqs to cut off should-have-been-stopped content post-hoc
719
+ for term in until:
720
+ if len(term) > 0:
721
+ # ignore '' separator,
722
+ # for seq2seq case where self.tok_decode(self.eot_token_id) = ''
723
+ s = s.split(term)[0]
724
+
725
+ res[key].append(s)
726
+
727
+ self.cache_hook.add_partial(
728
+ "generate_until", (context, gen_kwargs), s
729
+ )
730
+ pbar.update(1)
731
+ # reorder this group of results back to original unsorted form
732
+ res[key] = re_ord.get_original(res[key])
733
+
734
+ pbar.close()
735
+
736
+ return grouper.get_original(res)
lm-evaluation/lm_eval/models/openai_completions.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from collections import defaultdict
4
+ from importlib.util import find_spec
5
+ from typing import List, Literal, Optional, Tuple
6
+
7
+ from tqdm import tqdm
8
+
9
+ import lm_eval.models.utils
10
+ from lm_eval import utils
11
+ from lm_eval.api.model import LM, TemplateLM
12
+ from lm_eval.api.registry import register_model
13
+ from lm_eval.models.utils import retry_on_specific_exceptions
14
+ from lm_eval.utils import eval_logger
15
+
16
+
17
+ def get_result(response, ctxlen: int) -> Tuple[float, bool]:
18
+ """Process results from OpenAI API response.
19
+
20
+ :param response: dict
21
+ OpenAI API Response
22
+ :param ctxlen: int
23
+ Length of context (so we can slice them away and only keep the predictions)
24
+ :return:
25
+ continuation_logprobs: np.array
26
+ Log probabilities of continuation tokens
27
+ is_greedy: bool
28
+ whether argmax matches given continuation exactly
29
+ """
30
+ is_greedy = True
31
+ logprobs = response.logprobs.token_logprobs
32
+ continuation_logprobs = sum(logprobs[ctxlen:])
33
+
34
+ for i in range(ctxlen, len(response.logprobs.token_logprobs)):
35
+ token = response.logprobs.token_logprobs[i]
36
+ top_tokens = response.logprobs.top_logprobs[i]
37
+ top_token = max(top_tokens.keys(), key=lambda x: top_tokens[x])
38
+ if top_token != token:
39
+ is_greedy = False
40
+ break
41
+
42
+ return continuation_logprobs, is_greedy
43
+
44
+
45
+ def oa_completion(client, chat: bool = False, **kwargs):
46
+ """Query OpenAI API for completion.
47
+
48
+ Retry with back-off until they respond
49
+ """
50
+ if not find_spec("openai") or not find_spec("tiktoken"):
51
+ raise Exception(
52
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. "
53
+ "Please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`"
54
+ )
55
+ else:
56
+ import openai
57
+
58
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
59
+ import traceback
60
+
61
+ traceback.print_exc()
62
+
63
+ @retry_on_specific_exceptions(
64
+ on_exceptions=[openai.OpenAIError],
65
+ max_retries=None, # retry forever, consider changing
66
+ on_exception_callback=_exception_callback,
67
+ )
68
+ def completion():
69
+ if chat:
70
+ return client.chat.completions.create(**kwargs)
71
+ else:
72
+ return client.completions.create(**kwargs)
73
+
74
+ return completion()
75
+
76
+
77
+ @register_model("openai-completions", "local-completions")
78
+ class OpenaiCompletionsLM(TemplateLM):
79
+ _DEFAULT_MAX_LENGTH = 2048
80
+
81
+ def __init__(
82
+ self,
83
+ model: str,
84
+ base_url: str = None,
85
+ tokenizer: Optional[str] = None,
86
+ tokenizer_backend: Literal["tiktoken", "huggingface"] = "tiktoken",
87
+ truncate: bool = False,
88
+ max_gen_toks: int = 256,
89
+ batch_size: int = 1,
90
+ seed: int = 1234,
91
+ max_length: Optional[int] = None,
92
+ ) -> None:
93
+ """
94
+
95
+ :param engine: str
96
+ OpenAI API engine (e.g. gpt-3.5-turbo-instruct)
97
+ :param truncate: bool
98
+ Truncate input if too long (if False and input is too long, throw error)
99
+ """
100
+ super().__init__()
101
+ self.seed = seed
102
+ try:
103
+ import openai # noqa: E401
104
+ import tiktoken
105
+ except ModuleNotFoundError:
106
+ raise Exception(
107
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
108
+ please install these via `pip install lm-eval[openai]` or `pip install -e .\"[openai]\"`",
109
+ )
110
+ self.model = model
111
+ self.base_url = base_url
112
+ self.tokenizer_backend = tokenizer_backend
113
+ self.truncate = truncate
114
+ self._batch_size = int(batch_size)
115
+ self._max_gen_toks = max_gen_toks
116
+ self._max_length = max_length
117
+
118
+ # if we have a local model, use HF tokenizer over tiktoken
119
+ if self.tokenizer_backend == "huggingface":
120
+ import transformers # noqa: E401
121
+
122
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
123
+ tokenizer if tokenizer else self.model
124
+ )
125
+ self.vocab_size = self.tokenizer.vocab
126
+ self.end_of_text_token_id = self.tokenizer.eos_token
127
+ elif self.tokenizer_backend == "tiktoken":
128
+ if self.base_url:
129
+ eval_logger.warning(
130
+ f"Passed `base_url={self.base_url}` but using Tiktoken tokenizer backend. "
131
+ "Pass `tokenizer_backend=huggingface` and provide the HF tokenizer name if your model does not use Tiktoken."
132
+ )
133
+
134
+ self.tokenizer = tiktoken.encoding_for_model(self.model)
135
+ self.vocab_size = self.tokenizer.n_vocab
136
+ self.end_of_text_token_id = self.tokenizer.eot_token
137
+ else:
138
+ raise ValueError(
139
+ f"Expected tokenizer_backend to be one of ['tiktoken', 'huggingface'] but got {self.tokenizer_backend}"
140
+ )
141
+
142
+ # Read from environment variable OPENAI_API_KEY
143
+ # Set to EMPTY for local
144
+ openai.api_key = os.environ["OPENAI_API_KEY"]
145
+ if self.base_url:
146
+ self.client = openai.OpenAI(base_url=self.base_url)
147
+ else:
148
+ self.client = openai.OpenAI()
149
+
150
+ @property
151
+ def eot_token_id(self):
152
+ return self.end_of_text_token_id
153
+
154
+ @property
155
+ def max_length(self) -> int:
156
+ if self._max_length:
157
+ return self._max_length
158
+ else:
159
+ return self._DEFAULT_MAX_LENGTH
160
+
161
+ @property
162
+ def max_gen_toks(self) -> int:
163
+ return self._max_gen_toks
164
+
165
+ @property
166
+ def batch_size(self) -> int:
167
+ return self._batch_size
168
+
169
+ @property
170
+ def device(self):
171
+ # Isn't used because we override _loglikelihood_tokens
172
+ raise NotImplementedError()
173
+
174
+ def tok_encode(self, string: str, **kwargs) -> List[int]:
175
+ return self.tokenizer.encode(string)
176
+
177
+ def tok_decode(self, tokens: List[int]) -> str:
178
+ return self.tokenizer.decode(tokens)
179
+
180
+ def _loglikelihood_tokens(
181
+ self, requests, disable_tqdm: bool = False
182
+ ) -> List[Tuple[float, bool]]:
183
+ res = []
184
+
185
+ def _collate(x):
186
+ # this doesn't efficiently handle last-token differences yet, but those are kinda annoying because
187
+ # it's not guaranteed that the 100 or so logprobs we get to see actually contain all the continuations
188
+ # we care about, and so we need some kind of backup for when it isn't
189
+ toks = x[1] + x[2]
190
+ return -len(toks), tuple(toks)
191
+
192
+ re_ord = utils.Reorderer(requests, _collate)
193
+
194
+ for chunk in tqdm(
195
+ list(lm_eval.models.utils.chunks(re_ord.get_reordered(), self.batch_size)),
196
+ disable=disable_tqdm,
197
+ ):
198
+ inps = []
199
+ ctxlens = []
200
+ for cache_key, context_enc, continuation_enc in chunk:
201
+ # max_length+1 because the API takes up to 2049 tokens, including the first context token
202
+ inp = (context_enc + continuation_enc)[-(self.max_length + 1) :]
203
+ # TODO: the logic is much simpler if we just look at the length of continuation tokens
204
+ ctxlen = len(context_enc) - max(
205
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length + 1)
206
+ )
207
+
208
+ inps.append(inp)
209
+ ctxlens.append(ctxlen)
210
+
211
+ response = oa_completion(
212
+ client=self.client,
213
+ model=self.model,
214
+ prompt=inps,
215
+ echo=True,
216
+ max_tokens=0,
217
+ temperature=0.0,
218
+ logprobs=10,
219
+ seed=self.seed,
220
+ )
221
+
222
+ for resp, ctxlen, (cache_key, context_enc, continuation_enc) in zip(
223
+ response.choices, ctxlens, chunk
224
+ ):
225
+ answer = get_result(resp, ctxlen)
226
+
227
+ res.append(answer)
228
+
229
+ # partial caching
230
+ if cache_key is not None:
231
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
232
+ return re_ord.get_original(res)
233
+
234
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
235
+ if not requests:
236
+ return []
237
+ res = []
238
+ requests = [req.args for req in requests]
239
+
240
+ def _collate(x):
241
+ toks = self.tok_encode(x[0])
242
+ return len(toks), x[0]
243
+
244
+ re_ord = utils.Reorderer(requests, _collate)
245
+
246
+ def sameuntil_chunks(xs, size):
247
+ ret = []
248
+ lastuntil = xs[0][1]
249
+ for x in xs:
250
+ if len(ret) >= size or x[1] != lastuntil:
251
+ yield ret, lastuntil
252
+ ret = []
253
+ lastuntil = x[1]
254
+ ret.append(x)
255
+
256
+ if ret:
257
+ yield ret, lastuntil
258
+
259
+ # todo: more intelligent batching for heterogeneous `until`
260
+ for chunk, request_args in tqdm(
261
+ list(sameuntil_chunks(re_ord.get_reordered(), self.batch_size)),
262
+ disable=disable_tqdm,
263
+ ):
264
+ inps = []
265
+ self._max_gen_toks = request_args.get("max_gen_toks", self.max_gen_toks)
266
+ for context, _ in chunk:
267
+ context_enc = self.tok_encode(context)
268
+ inp = context_enc[-(self.max_length - self.max_gen_toks) :]
269
+ inps.append(inp)
270
+
271
+ until = request_args.get("until", ["<|endoftext|>"])
272
+ request_args["temperature"] = request_args.get("temperature", 0)
273
+
274
+ response = oa_completion(
275
+ client=self.client,
276
+ model=self.model,
277
+ prompt=inps,
278
+ max_tokens=self.max_gen_toks,
279
+ stop=until,
280
+ seed=self.seed,
281
+ **{
282
+ k: v
283
+ for k, v in request_args.items()
284
+ if k not in {"do_sample", "max_gen_toks", "until"}
285
+ },
286
+ )
287
+ for resp, (context, args_) in zip(response.choices, chunk):
288
+ s = getattr(resp, "text")
289
+
290
+ until_ = until
291
+
292
+ for term in until_:
293
+ if len(term) > 0:
294
+ s = s.split(term)[0]
295
+
296
+ # partial caching
297
+ self.cache_hook.add_partial(
298
+ "generate_until", (context, {"until": until_}), s
299
+ )
300
+
301
+ res.append(s)
302
+ return re_ord.get_original(res)
303
+
304
+ def _model_call(self, inps):
305
+ # Isn't used because we override _loglikelihood_tokens
306
+ raise NotImplementedError()
307
+
308
+ def _model_generate(self, context, max_length, eos_token_id):
309
+ # Isn't used because we override generate_until
310
+ raise NotImplementedError()
311
+
312
+ def loglikelihood_rolling(
313
+ self, requests, disable_tqdm: bool = False
314
+ ) -> List[float]:
315
+ loglikelihoods = []
316
+
317
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
318
+ rolling_token_windows = list(
319
+ map(
320
+ utils.make_disjoint_window,
321
+ utils.get_rolling_token_windows(
322
+ token_list=self.tok_encode(string),
323
+ prefix_token=self.eot_token_id,
324
+ max_seq_len=self.max_length,
325
+ context_len=1,
326
+ ),
327
+ )
328
+ )
329
+
330
+ # TODO: Right now, we pass single EOT token to the Encoder and the full context to the decoder, in seq2seq case
331
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
332
+
333
+ string_nll = self._loglikelihood_tokens(
334
+ rolling_token_windows,
335
+ disable_tqdm=True,
336
+ )
337
+
338
+ # discard is_greedy
339
+ string_nll = [x[0] for x in string_nll]
340
+
341
+ string_nll = sum(string_nll)
342
+ loglikelihoods.append(string_nll)
343
+ return loglikelihoods
344
+
345
+
346
+ @register_model("openai-chat-completions", "local-chat-completions")
347
+ class OpenaiChatCompletionsLM(LM):
348
+ def __init__(
349
+ self,
350
+ model: str = "gpt-3.5-turbo", # GPT model or Local model using HuggingFace model paths
351
+ base_url: str = None,
352
+ truncate: bool = False,
353
+ **kwargs,
354
+ ) -> None:
355
+ """
356
+
357
+ :param model: str
358
+ Implements an OpenAI-style chat completion API for
359
+ accessing both OpenAI OR locally-hosted models using
360
+ HuggingFace Tokenizer
361
+ OpenAI API model (e.g. gpt-3.5-turbo)
362
+ using the **gen_kwargs passed on init
363
+ :param truncate: bool
364
+ Truncate input if too long (if False and input is too long, throw error)
365
+ """
366
+ super().__init__()
367
+ try:
368
+ import openai # noqa: E401
369
+ except ModuleNotFoundError:
370
+ raise Exception(
371
+ "attempted to use 'openai' LM type, but package `openai` or `tiktoken` are not installed. \
372
+ please install these via `pip install lm-eval[openai]` or `pip install -e .[openai]`",
373
+ )
374
+ self.model = model
375
+ self.base_url = base_url
376
+ self.truncate = truncate
377
+
378
+ # Read from environment variable OPENAI_API_KEY
379
+ # Set to EMPTY for local
380
+ if self.base_url:
381
+ self.client = openai.OpenAI(base_url=self.base_url)
382
+ else:
383
+ self.client = openai.OpenAI() # openai.AsyncOpenAI()
384
+
385
+ @property
386
+ def max_length(self) -> int:
387
+ # Note: the OpenAI API supports up to 2049 tokens, with the first token being the first input token
388
+ return 2048
389
+
390
+ @property
391
+ def max_gen_toks(self) -> int:
392
+ return 256
393
+
394
+ @property
395
+ def batch_size(self):
396
+ # Isn't used because we override _loglikelihood_tokens
397
+ raise NotImplementedError()
398
+
399
+ @property
400
+ def device(self):
401
+ # Isn't used because we override _loglikelihood_tokens
402
+ raise NotImplementedError()
403
+
404
+ def generate_until(self, requests, disable_tqdm: bool = False) -> List[str]:
405
+ res = defaultdict(list)
406
+ re_ords = {}
407
+
408
+ # we group requests by their generation_kwargs,
409
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
410
+ # in the same batch.
411
+ grouper = lm_eval.models.utils.Grouper(requests, lambda x: str(x.args[1]))
412
+ for key, reqs in grouper.get_grouped().items():
413
+ # within each set of reqs for given kwargs, we reorder by token length, descending.
414
+ re_ords[key] = utils.Reorderer(
415
+ [req.args for req in reqs], lambda x: (-len(x[0]), x[0])
416
+ )
417
+
418
+ pbar = tqdm(total=len(requests), disable=(disable_tqdm or (self.rank != 0)))
419
+ for key, re_ord in re_ords.items():
420
+ # n needs to be 1 because messages in
421
+ # chat completion are not batch but
422
+ # is regarded as a single conversation.
423
+ chunks = lm_eval.models.utils.chunks(re_ord.get_reordered(), n=1)
424
+ for chunk in chunks:
425
+ contexts, all_gen_kwargs = zip(*chunk)
426
+ inps = [{"role": "user", "content": context} for context in contexts]
427
+
428
+ gen_kwargs = all_gen_kwargs[0]
429
+ until = None
430
+ if isinstance(kwargs := copy.deepcopy(gen_kwargs), dict):
431
+ if "do_sample" in kwargs.keys():
432
+ kwargs.pop("do_sample")
433
+ if "until" in kwargs.keys():
434
+ until = kwargs.pop("until")
435
+ if isinstance(until, str):
436
+ until = [kwargs]
437
+ elif not isinstance(until, list):
438
+ raise ValueError(
439
+ f"Expected repr(kwargs['until']) to be of type Union[str, list] but got {until}"
440
+ )
441
+ kwargs["stop"] = until
442
+ kwargs["max_tokens"] = kwargs.pop("max_gen_toks", self.max_gen_toks)
443
+ else:
444
+ raise ValueError(
445
+ f"Expected repr(kwargs) to be of type repr(dict) but got {kwargs}"
446
+ )
447
+
448
+ response = oa_completion(
449
+ client=self.client,
450
+ chat=True,
451
+ messages=inps,
452
+ model=self.model,
453
+ **kwargs,
454
+ )
455
+
456
+ for resp, (context, args_) in zip(response.choices, chunk):
457
+ s = resp.message.content
458
+
459
+ if until is not None:
460
+ for term in until:
461
+ if len(term) > 0:
462
+ s = s.split(term)[0]
463
+
464
+ res[key].append(s)
465
+
466
+ self.cache_hook.add_partial(
467
+ "generate_until", (context, {"until": until}), s
468
+ )
469
+ pbar.update(1)
470
+ # reorder this group of results back to original unsorted form
471
+ res[key] = re_ord.get_original(res[key])
472
+
473
+ pbar.close()
474
+
475
+ return grouper.get_original(res)
476
+
477
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
478
+ raise NotImplementedError("No support for logits.")
479
+
480
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
481
+ raise NotImplementedError("No support for logits.")
lm-evaluation/lm_eval/models/optimum_lm.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.util import find_spec
2
+ from pathlib import Path
3
+
4
+ from lm_eval.api.registry import register_model
5
+ from lm_eval.models.huggingface import HFLM
6
+
7
+
8
+ @register_model("openvino")
9
+ class OptimumLM(HFLM):
10
+ """
11
+ Optimum Intel provides a simple interface to optimize Transformer models and convert them to \
12
+ OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \
13
+ Intel® architectures using OpenVINO™ runtime.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ device="cpu",
19
+ **kwargs,
20
+ ) -> None:
21
+ if "backend" in kwargs:
22
+ # optimum currently only supports causal models
23
+ assert (
24
+ kwargs["backend"] == "causal"
25
+ ), "Currently, only OVModelForCausalLM is supported."
26
+
27
+ self.openvino_device = device
28
+
29
+ super().__init__(
30
+ device=self.openvino_device,
31
+ backend=kwargs.pop("backend", "causal"),
32
+ **kwargs,
33
+ )
34
+
35
+ def _create_model(
36
+ self,
37
+ pretrained: str,
38
+ revision="main",
39
+ dtype="auto",
40
+ trust_remote_code=False,
41
+ **kwargs,
42
+ ) -> None:
43
+ if not find_spec("optimum"):
44
+ raise Exception(
45
+ "package `optimum` is not installed. Please install it via `pip install optimum[openvino]`"
46
+ )
47
+ else:
48
+ from optimum.intel.openvino import OVModelForCausalLM
49
+
50
+ model_kwargs = kwargs if kwargs else {}
51
+ model_file = Path(pretrained) / "openvino_model.xml"
52
+ if model_file.exists():
53
+ export = False
54
+ else:
55
+ export = True
56
+ kwargs["ov_config"] = {
57
+ "PERFORMANCE_HINT": "LATENCY",
58
+ "NUM_STREAMS": "1",
59
+ "CACHE_DIR": "",
60
+ }
61
+
62
+ self._model = OVModelForCausalLM.from_pretrained(
63
+ pretrained,
64
+ revision=revision,
65
+ trust_remote_code=trust_remote_code,
66
+ export=export,
67
+ device=self.openvino_device.upper(),
68
+ **model_kwargs,
69
+ )
lm-evaluation/lm_eval/models/textsynth.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ TextSynth API
2
+ Implementation provided by Fabrice Bellard:
3
+ https://github.com/EleutherAI/lm-evaluation-harness/issues/295
4
+
5
+ In order to use the API, you must have a valid TextSynth account and
6
+ enough credits.
7
+
8
+ Example usage:
9
+
10
+ python main.py --model textsynth --model_args engine=gptj_6B --no_cache --tasks piqa
11
+
12
+ Homepage: https://textsynth.com/index.html
13
+ """
14
+ import logging
15
+ import os
16
+
17
+ import requests as _requests
18
+ from tqdm import tqdm
19
+
20
+ from lm_eval.api.model import LM
21
+ from lm_eval.api.registry import register_model
22
+ from lm_eval.models.utils import retry_on_specific_exceptions
23
+
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def textsynth_completion(**kwargs):
29
+ """Query TextSynth API for completion.
30
+ Retry with back-off until they respond.
31
+ """
32
+
33
+ def _exception_callback(e: Exception, sleep_time: float) -> None:
34
+ import traceback
35
+
36
+ traceback.print_exc()
37
+
38
+ @retry_on_specific_exceptions(
39
+ on_exceptions=[_requests.exceptions.RequestException],
40
+ max_retries=None, # retry forever, consider changing
41
+ on_exception_callback=_exception_callback,
42
+ )
43
+ def completion():
44
+ return _requests.post(**kwargs)
45
+
46
+ return completion()
47
+
48
+
49
+ @register_model("textsynth")
50
+ class TextSynthLM(LM):
51
+ def __init__(self, engine, truncate: bool = False, **kwargs) -> None:
52
+ """
53
+ :param engine: str
54
+ TextSynth API engine (e.g. `gptj_6B`)
55
+ :param truncate: bool
56
+ Truncate input if too long (if False and input is too long, throw error)
57
+ """
58
+ super().__init__()
59
+
60
+ self.engine = engine
61
+ self.truncate = truncate
62
+ self.api_url = "https://api.textsynth.com"
63
+ # Read from environment variable TEXTSYNTH_API_SECRET_KEY
64
+ self.api_key = os.environ["TEXTSYNTH_API_SECRET_KEY"]
65
+
66
+ @property
67
+ def eot_token_id(self):
68
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
69
+ raise NotImplementedError()
70
+
71
+ @property
72
+ def max_length(self) -> int:
73
+ # NOTE: Turn on truncation to avoid errors on long inputs.
74
+ return 2048
75
+
76
+ @property
77
+ def max_gen_toks(self) -> int:
78
+ return 256
79
+
80
+ @property
81
+ def batch_size(self):
82
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
83
+ raise NotImplementedError()
84
+
85
+ @property
86
+ def device(self):
87
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
88
+ raise NotImplementedError()
89
+
90
+ def tok_encode(self, string: str):
91
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
92
+ raise NotImplementedError()
93
+
94
+ def tok_decode(self, tokens):
95
+ # Isn't used because we override loglikelihood, loglikelihood_rolling and generate_until
96
+ raise NotImplementedError()
97
+
98
+ def loglikelihood(self, requests, disable_tqdm: bool = False):
99
+ res = []
100
+ for context, continuation in tqdm(requests, disable=disable_tqdm):
101
+ response = textsynth_completion(
102
+ url=self.api_url + "/v1/engines/" + self.engine + "/logprob",
103
+ headers={"Authorization": "Bearer " + self.api_key},
104
+ json={"context": context, "continuation": continuation},
105
+ )
106
+ resp = response.json()
107
+ if "logprob" in resp:
108
+ logprob = resp["logprob"]
109
+ is_greedy = resp["is_greedy"]
110
+ res.append((logprob, is_greedy))
111
+
112
+ self.cache_hook.add_partial(
113
+ "loglikelihood", (context, continuation), (logprob, is_greedy)
114
+ )
115
+ else:
116
+ logger.error(
117
+ f"The following response does not contain `logprobs`. Got:\n{resp}"
118
+ )
119
+ assert False
120
+ return res
121
+
122
+ def loglikelihood_rolling(self, requests, disable_tqdm: bool = False):
123
+ # TODO: The TextSynth API does not support tokenized inputs so we cannot
124
+ # manually partition long contexts into smaller rolling windows as
125
+ # done for other models derived from `BaseLM`. Override this method
126
+ # with a windowing scheme that works for direct string inputs.
127
+ raise NotImplementedError(
128
+ "`loglikelihood_rolling` is currently not supported due to lack of "
129
+ "input tokenization support from TextSynth."
130
+ )
131
+
132
+ def generate_until(self, requests, disable_tqdm: bool = False):
133
+ if not requests:
134
+ return []
135
+
136
+ res = []
137
+ for request in tqdm(requests, disable=disable_tqdm):
138
+ inp = request[0]
139
+ request_args = request[1]
140
+ until = request_args["until"]
141
+ response = textsynth_completion(
142
+ url=self.api_url + "/v1/engines/" + self.engine + "/completions",
143
+ headers={"Authorization": "Bearer " + self.api_key},
144
+ json={
145
+ "prompt": inp,
146
+ "max_tokens": self.max_gen_toks,
147
+ "top_k": 1,
148
+ "stop": until,
149
+ },
150
+ )
151
+ resp = response.json()
152
+ if "text" in resp:
153
+ s = resp["text"]
154
+ res.append(s)
155
+
156
+ self.cache_hook.add_partial("generate_until", (inp, request_args), s)
157
+ else:
158
+ logger.error(
159
+ "The following response does not contain generated `text`. "
160
+ "Got:\n{resp}"
161
+ )
162
+ assert False
163
+ return res
164
+
165
+ def _model_call(self, inps):
166
+ # Isn't used because we override _loglikelihood_tokens
167
+ raise NotImplementedError()
168
+
169
+ def _model_generate(self, context, max_length, eos_token_id):
170
+ # Isn't used because we override generate_until
171
+ raise NotImplementedError()
lm-evaluation/lm_eval/models/utils.py ADDED
@@ -0,0 +1,615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import fnmatch
3
+ import gc
4
+ import itertools
5
+ import time
6
+ from functools import wraps
7
+ from typing import (
8
+ Any,
9
+ Callable,
10
+ Dict,
11
+ Iterable,
12
+ Iterator,
13
+ List,
14
+ Literal,
15
+ Optional,
16
+ Tuple,
17
+ Type,
18
+ Union,
19
+ )
20
+
21
+ import torch
22
+ import transformers
23
+
24
+ from lm_eval.utils import eval_logger
25
+
26
+
27
+ def chunks(iter, n: int = 0, fn=None):
28
+ """
29
+ Divides an iterable into chunks of specified size or based on a given function.
30
+ Useful for batching
31
+
32
+ Parameters:
33
+ - iter: The input iterable to be divided into chunks.
34
+ - n: An integer representing the size of each chunk. Default is 0.
35
+ - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None.
36
+
37
+ Returns:
38
+ An iterator that yields chunks of the input iterable.
39
+
40
+ Example usage:
41
+ ```
42
+ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
43
+ for chunk in chunks(data, 3):
44
+ print(chunk)
45
+ ```
46
+ Output:
47
+ ```
48
+ [1, 2, 3]
49
+ [4, 5, 6]
50
+ [7, 8, 9]
51
+ [10]
52
+ ```
53
+ """
54
+ arr = []
55
+ for i, x in enumerate(iter):
56
+ arr.append(x)
57
+ if len(arr) == (fn(i, iter) if fn else n):
58
+ yield arr
59
+ arr = []
60
+
61
+ if arr:
62
+ yield arr
63
+
64
+
65
+ class MultiChoice:
66
+ def __init__(self, choices) -> None:
67
+ self.choices = choices
68
+
69
+ # Simple wildcard support (linux filename patterns)
70
+ def __contains__(self, values) -> bool:
71
+ for value in values.split(","):
72
+ if len(fnmatch.filter(self.choices, value)) == 0:
73
+ eval_logger.info("Available tasks to choose:")
74
+ for choice in self.choices:
75
+ eval_logger.info(f" - {choice}")
76
+ raise ValueError("'{}' is not in task list".format(value))
77
+ return True
78
+
79
+ def __iter__(self) -> Iterator:
80
+ for choice in self.choices:
81
+ yield choice
82
+
83
+
84
+ class Grouper:
85
+ """
86
+ takes an array `arr` and function `fn` and returns a dictionary
87
+ with keys fn(ob) for each ob in `arr` and with values `self.arr[key]` a list of all
88
+ objects in `arr` satisfying `key == fn(ob)`.
89
+ """
90
+
91
+ def __init__(self, arr, fn) -> None:
92
+ # self.orig_arr = arr
93
+ self.size = len(arr)
94
+ arr = list(enumerate(arr))
95
+
96
+ def group_return_dict(arr, fn):
97
+ res = collections.defaultdict(list)
98
+
99
+ for ob in arr:
100
+ res[fn(ob)].append(ob)
101
+ return res
102
+
103
+ arr = group_return_dict(arr, lambda x: fn(x[1]))
104
+
105
+ # self.arr has format Dict[Tuple[int, <entry from orig. arr>]]
106
+ self.arr = arr
107
+ self._grouped = None
108
+
109
+ def get_grouped(self):
110
+ # return the contents but not indices for our grouped dict.
111
+ if self._grouped:
112
+ return self._grouped
113
+ grouped = {}
114
+ for key in self.arr.keys():
115
+ # drop the index from each element of self.arr
116
+ grouped[key] = [y[1] for y in self.arr[key]]
117
+ self._grouped = grouped
118
+ return grouped
119
+
120
+ def get_original(self, grouped_dict):
121
+ # take in a grouped dictionary with e.g. results for each key listed
122
+ # in the same order as the instances in `self.arr`, and
123
+ # return the results in the same (single list) order as `self.orig_arr`.
124
+ res = [None] * self.size
125
+ cov = [False] * self.size
126
+ # orig = [None] * self.size
127
+
128
+ assert grouped_dict.keys() == self.arr.keys()
129
+
130
+ for key in grouped_dict.keys():
131
+ for (ind, _), v in zip(self.arr[key], grouped_dict[key]):
132
+ res[ind] = v
133
+ cov[ind] = True
134
+ # orig[ind] = _
135
+
136
+ assert all(cov)
137
+ # assert orig == self.orig_arr
138
+
139
+ return res
140
+
141
+
142
+ def pad_and_concat(
143
+ max_length: int,
144
+ tensors: List[torch.Tensor],
145
+ padding_side: Literal["right", "left"] = "right",
146
+ ):
147
+ """
148
+ Method for padding a list of tensors given the maximum tensor
149
+ length in the batch. Used for batching inputs and continuations in
150
+ seq2seq models.
151
+ """
152
+ assert (
153
+ padding_side == "left" or padding_side == "right"
154
+ ), f"Unrecognized padding type: '{padding_side}' not 'left' or 'right'"
155
+
156
+ for i, tensor in enumerate(tensors):
157
+ if len(tensor.shape) == 2:
158
+ tensor = tensor.squeeze(0) # squeeze, in case passed [1, seq] size
159
+ tensor_len = tensor.shape[0]
160
+ if tensor_len < max_length:
161
+ if padding_side == "right":
162
+ # right-pad
163
+ tensors[i] = torch.cat(
164
+ [
165
+ tensor, # [seq]
166
+ torch.zeros(
167
+ max_length - tensor_len,
168
+ dtype=torch.long,
169
+ device=tensor.device,
170
+ ), # [padding_length - seq]
171
+ ],
172
+ dim=0,
173
+ ).unsqueeze(0)
174
+ else:
175
+ # left-pad
176
+ tensors[i] = torch.cat(
177
+ [
178
+ torch.zeros(
179
+ max_length - tensor_len,
180
+ dtype=torch.long,
181
+ device=tensor.device,
182
+ ), # [padding_length - seq]
183
+ tensor, # [seq]
184
+ ],
185
+ dim=0,
186
+ ).unsqueeze(0)
187
+ else:
188
+ tensors[i] = tensor.unsqueeze(0)
189
+
190
+ return torch.cat(tensors, dim=0)
191
+
192
+
193
+ def clear_torch_cache() -> None:
194
+ gc.collect()
195
+ torch.cuda.empty_cache()
196
+
197
+
198
+ def get_dtype(dtype: Union[str, torch.dtype]) -> torch.dtype:
199
+ """Converts `dtype` from `str` to torch.dtype when possible. Does not use an instantiated HF AutoConfig"""
200
+ if isinstance(dtype, str) and dtype != "auto":
201
+ # Convert `str` args torch dtype: `float16` -> `torch.float16`
202
+ _torch_dtype = getattr(torch, dtype)
203
+ else:
204
+ _torch_dtype = dtype
205
+ return _torch_dtype
206
+
207
+
208
+ class MultiTokenEOSCriteria(transformers.StoppingCriteria):
209
+ """Criteria to stop on the specified multi-token sequence."""
210
+
211
+ def __init__(
212
+ self,
213
+ sequence: str,
214
+ tokenizer: transformers.PreTrainedTokenizer,
215
+ initial_decoder_input_length: int,
216
+ batch_size: int,
217
+ ) -> None:
218
+ self.initial_decoder_input_length = initial_decoder_input_length
219
+ self.done_tracker = [False] * batch_size
220
+ self.sequence = sequence
221
+ self.sequence_ids = tokenizer.encode(sequence, add_special_tokens=False)
222
+ # print(sequence, self.sequence_ids)
223
+ # we look back for 2 more tokens than it takes to encode our stop sequence
224
+ # because tokenizers suck, and a model might generate `['\n', '\n']` but our `sequence` is `['\n\n']`
225
+ # and we don't want to mistakenly not stop a generation because our
226
+ # (string) stop sequence was output in a different tokenization
227
+
228
+ # NOTE: there is a minor danger that this will end up looking back 2 tokens into the past, into the inputs to the model,
229
+ # and stopping generation immediately as a result. With only 2 extra tokens of lookback, this risk is minimized
230
+ # Additionally, in lookback_ids_batch we should prevent ever looking back into the inputs as described.
231
+ self.sequence_id_len = len(self.sequence_ids) + 2
232
+ self.tokenizer = tokenizer
233
+
234
+ def __call__(self, input_ids, scores, **kwargs) -> bool:
235
+ # For efficiency, we compare the last n tokens where n is the number of tokens in the stop_sequence
236
+ lookback_ids_batch = input_ids[:, self.initial_decoder_input_length :]
237
+
238
+ lookback_ids_batch = lookback_ids_batch[:, -self.sequence_id_len :]
239
+
240
+ lookback_tokens_batch = self.tokenizer.batch_decode(lookback_ids_batch)
241
+
242
+ for i, done in enumerate(self.done_tracker):
243
+ if not done:
244
+ self.done_tracker[i] = self.sequence in lookback_tokens_batch[i]
245
+ return False not in self.done_tracker
246
+
247
+
248
+ def stop_sequences_criteria(
249
+ tokenizer: transformers.PreTrainedTokenizer,
250
+ stop_sequences: List[str],
251
+ initial_decoder_input_length: int,
252
+ batch_size: int,
253
+ ) -> transformers.StoppingCriteriaList:
254
+ return transformers.StoppingCriteriaList(
255
+ [
256
+ *[
257
+ MultiTokenEOSCriteria(
258
+ sequence, tokenizer, initial_decoder_input_length, batch_size
259
+ )
260
+ for sequence in stop_sequences
261
+ ],
262
+ ]
263
+ )
264
+
265
+
266
+ def undistribute(iterable):
267
+ """
268
+ Undoes https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.distribute .
269
+
270
+ Re-interleaves results that have been split using more_itertools.distribute:
271
+ >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
272
+ >>> list(group_1)
273
+ [1, 3, 5]
274
+ >>> list(group_2)
275
+ [2, 4, 6]
276
+ >>> undistribute([group_1, group_2])
277
+ [1, 2, 3, 4, 5, 6]
278
+
279
+ Handles non-uniform component lengths:
280
+
281
+ >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
282
+ >>> [list(c) for c in children]
283
+ [[1, 4, 7], [2, 5], [3, 6]]
284
+ >>> undistribute(children)
285
+ [1, 2, 3, 4, 5, 6, 7]
286
+
287
+ Also handles when some iterables are empty:
288
+
289
+ >>> children = distribute(5, [1, 2, 3])
290
+ >>> [list(c) for c in children]
291
+ [[1], [2], [3], [], []]
292
+ >>> undistribute(children)
293
+ [1, 2, 3]
294
+
295
+ """
296
+
297
+ return [
298
+ x
299
+ for x in itertools.chain.from_iterable(
300
+ itertools.zip_longest(*[list(x) for x in iterable])
301
+ )
302
+ if x is not None
303
+ ]
304
+
305
+
306
+ def retry_on_specific_exceptions(
307
+ on_exceptions: List[Type[Exception]],
308
+ max_retries: Optional[int] = None,
309
+ backoff_time: float = 3.0,
310
+ backoff_multiplier: float = 1.5,
311
+ on_exception_callback: Optional[Callable[[Exception, float], Any]] = None,
312
+ ):
313
+ """Retry on an LLM Provider's rate limit error with exponential backoff
314
+ For example, to use for OpenAI, do the following:
315
+ ```
316
+ from openai import RateLimitError
317
+
318
+ # Recommend specifying max_retries to avoid infinite loops!
319
+ @retry_on_specific_exceptions([RateLimitError], max_retries=3)
320
+ def completion(...):
321
+ # Wrap OpenAI completion function here
322
+ ...
323
+ ```
324
+ """
325
+
326
+ def decorator(func: Callable):
327
+ @wraps(func)
328
+ def wrapper(*args, **kwargs):
329
+ sleep_time = backoff_time
330
+ attempt = 0
331
+ while max_retries is None or attempt < max_retries:
332
+ try:
333
+ return func(*args, **kwargs)
334
+ except tuple(on_exceptions) as e:
335
+ if on_exception_callback is not None:
336
+ on_exception_callback(e, sleep_time)
337
+ time.sleep(sleep_time)
338
+ sleep_time *= backoff_multiplier
339
+ attempt += 1
340
+
341
+ return wrapper
342
+
343
+ return decorator
344
+
345
+
346
+ class Collator:
347
+ """
348
+ A class for reordering and batching elements of an array.
349
+
350
+ This class allows for sorting an array based on a provided sorting function, grouping elements based on a grouping function, and generating batches from the sorted and grouped data.
351
+
352
+ Objects of this class have the group_by attribute which determines the method for grouping
353
+ the data while batching it. Three options include "gen_kwargs", "contexts", or None:
354
+ If group_by == "gen_kwargs" then requests will be grouped by gen_kwargs
355
+ If group_by == "contexts" then requests will be grouped by context + cont[:-1]
356
+ If None then requests will just be reordered by length descending.
357
+ """
358
+
359
+ def __init__(
360
+ self,
361
+ arr: List,
362
+ sort_fn: Callable = lambda x: x,
363
+ group_fn: Callable = lambda x: x[1],
364
+ group_by: Union[Literal["gen_kwargs", "contexts"], None] = None,
365
+ ) -> None:
366
+ self._group_by = group_by
367
+ # 0 indices are enumerated indices. Apply functions to original arr.
368
+ self._sort_fn = lambda x: sort_fn(x[1])
369
+ self._group_fn = lambda x: group_fn(x[1])
370
+ self._reorder_indices: List = []
371
+ self._size = len(arr)
372
+ self._arr_with_indices: Union[Dict, Tuple[Tuple[int, Any], ...]] = tuple(
373
+ enumerate(arr)
374
+ ) # [indices, (arr)]
375
+ if self._group_by == "contexts":
376
+ self._group_by_context()
377
+ elif self._group_by == "gen_kwargs":
378
+ self._group_by_index()
379
+
380
+ def _group_by_index(self) -> None:
381
+ """Group the elements of a list based on their indices."""
382
+ self._arr_with_indices = self.group(
383
+ self._arr_with_indices, fn=self._group_fn, group_by="gen_kwargs"
384
+ )
385
+
386
+ def _group_by_context(self) -> None:
387
+ """Group the array with indices by context."""
388
+ self._arr_with_indices = self.group(
389
+ self._arr_with_indices, fn=self._group_fn, group_by="contexts"
390
+ )
391
+
392
+ def get_batched(self, n: int = 1, batch_fn: Optional[Callable] = None) -> Iterator:
393
+ """
394
+ Generates and yields batches from the reordered array. The method of grouping and batching
395
+ depends on the parameter `group_by`.
396
+ If `group_by` is set to "gen_kwargs", it will batch the
397
+ re-ordered values with same gen_kwargs for each batch.
398
+ If `group_by` is "contexts", it caches the requests by context before batching.
399
+ If `group_by` is neither "gen_kwargs" nor "contexts", it yields the reordered array
400
+
401
+ Parameters:
402
+ - n (int): The size of each batch. Defaults to 1.
403
+ - batch_fn ([Callable[[int, Iterable], int]] | None): A function to determine the size of
404
+ each batch. Optional, defaults to None.
405
+
406
+ Returns:
407
+ Iterator: An iterator over batches of reordered elements grouped as per the `group_by`
408
+ attribute.
409
+
410
+ Yields:
411
+ List of batched elements according to the `group_by` attribute.
412
+ """
413
+ if self._group_by == "gen_kwargs":
414
+ for (
415
+ key,
416
+ values,
417
+ ) in self._arr_with_indices.items(): # type: ignore
418
+ values = self._reorder(values)
419
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
420
+ yield from batch
421
+ elif self._group_by == "contexts":
422
+ # Get one sample from each key
423
+ values = self._reorder(
424
+ [value[0] for value in self._arr_with_indices.values()]
425
+ )
426
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
427
+ yield from batch
428
+ else:
429
+ values = self._reorder(self._arr_with_indices) # type: ignore
430
+ batch = self.get_chunks(values, n=n, fn=batch_fn)
431
+ yield from batch
432
+
433
+ def get_cache(
434
+ self,
435
+ req_str: Tuple[str, str] = None,
436
+ cxt_toks: List[int] = None,
437
+ cont_toks: List[int] = None,
438
+ logits: torch.Tensor = None,
439
+ ) -> Iterator[Tuple[Tuple[str, str], List[int], torch.Tensor]]:
440
+ """
441
+ Retrieves cached single-token continuations and their associated arguments, updating indices as necessary.
442
+
443
+ The behavior of this function varies depending on how the `group_by` attribute is set:
444
+
445
+ - When `group_by` is "contexts":
446
+ The function identifies single-token continuations by checking for keys that equate to
447
+ [context+continuation][-1] and logs the indices for re-ordering.
448
+ In this mode, this function can work in two scenarios:
449
+
450
+ 1. Cache Hit - Single Match:
451
+ If a single matching context-continuation pair is found in the cache,
452
+ the function yields the original arguments.
453
+
454
+ 2. Cache Hit - Multiple Matches:
455
+ If multiple matching context-continuation pairs are found in the cache,
456
+ the function expands the logits batch dimension to match the number of cache hits.
457
+ It updates the original requests and continuation tokens.
458
+
459
+ - When `group_by` is not set to "contexts":
460
+ This method yields the original arguments, logits and continuation tokens,
461
+ without checking for one-token continuations.
462
+
463
+ Parameters:
464
+ - req_str (tuple[str, str]): Original strings used for CachingLM.
465
+ - cxt_toks (list[int]): Full context tokens used for lookup.
466
+ - cont_toks (list[int]): Continuation tokens for which logits were generated.
467
+ - logits (torch.Tensor [1, seq_length, vocab_size]): Logits generated by the model given context and continuation keys.
468
+
469
+ Yields:
470
+ - Iterator:
471
+ - req_str (tuple[str, str]): strings used for CachingLM.
472
+ - cont_toks (list[int]) : continuation tokens.
473
+ - logits (torch.Tensor [1, seq_length, vocab_size]): The original logits (repeated cache hit times)
474
+ """
475
+ if self._group_by == "contexts":
476
+ cache_hit: List[
477
+ Tuple[int, Tuple[Tuple[str, str], List[int], List[int]]]
478
+ ] = self._arr_with_indices.pop(tuple(cxt_toks + cont_toks[:-1]))
479
+ if (cache_size := len(cache_hit)) == 1:
480
+ self._reorder_indices.extend(x[0] for x in cache_hit)
481
+ yield req_str, cont_toks, logits
482
+ else:
483
+ # If we have matching requests then expand the batch dimension (no-op) and
484
+ # yield each along with its corresponding args.
485
+ multilogits = logits.expand(cache_size, -1, -1).chunk(cache_size)
486
+ indices, req_str, cont_toks = zip(
487
+ *[(x[0], x[1][0], x[-1][-1]) for x in cache_hit]
488
+ )
489
+ self._reorder_indices.extend(indices)
490
+ for c_key, cont_tok, logit in zip(req_str, cont_toks, multilogits):
491
+ yield c_key, cont_tok, logit
492
+ else:
493
+ yield req_str, cont_toks, logits
494
+
495
+ def _reorder(self, arr: Union[List, Tuple[Tuple[int, Any], ...]]) -> Iterator:
496
+ """
497
+ Reorders the elements in the array based on the sorting function.
498
+
499
+ Parameters:
500
+ - arr (list | tuple[tuple[int, Any], ...]]): The array or iterable to be reordered.
501
+
502
+ Yields:
503
+ Iterator
504
+ """
505
+ arr = sorted(arr, key=self._sort_fn)
506
+ if not self._group_by == "contexts":
507
+ # If grouped by contexts then indices will be set in get_cache()
508
+ self._reorder_indices.extend([x[0] for x in arr])
509
+ yield from [x[1] for x in arr]
510
+
511
+ def get_original(self, newarr: List) -> List:
512
+ """
513
+ Restores the original order of elements from the reordered list.
514
+
515
+ Parameters:
516
+ - newarr (list): The reordered array.
517
+
518
+ Returns:
519
+ list: The array with elements restored to their original order.
520
+ """
521
+ res = [None] * self._size
522
+ cov = [False] * self._size
523
+
524
+ for ind, v in zip(self._reorder_indices, newarr):
525
+ res[ind] = v
526
+ cov[ind] = True
527
+
528
+ assert all(cov)
529
+
530
+ return res
531
+
532
+ def __len__(self):
533
+ return self._size
534
+
535
+ @staticmethod
536
+ def group(
537
+ arr: Iterable,
538
+ fn: Callable,
539
+ group_by: Literal["gen_kwargs", "contexts"] = "gen_kwargs",
540
+ ) -> dict:
541
+ """
542
+ Groups elements of an iterable based on a provided function.
543
+
544
+
545
+ The `group_by` parameter determines the method of grouping.
546
+ If `group_by` is "contexts", the elements are grouped by [context + cont][:-1].
547
+ If `group_by` is "gen_kwargs", the elements are grouped based on the gen_kwargs dict.
548
+
549
+ Parameters:
550
+ - arr (Iterable): The iterable to be grouped.
551
+ - fn (Callable): The function to determine the grouping.
552
+ - values (bool): If True, returns the values of the group. Defaults to False.
553
+
554
+ Returns:
555
+ Iterator: An iterable of grouped elements.
556
+ """
557
+ res = collections.defaultdict(list)
558
+ for ob in arr:
559
+ # where ob == [context + cont]
560
+ if group_by == "contexts":
561
+ res[tuple(fn(ob))].append(ob)
562
+ else:
563
+ try:
564
+ hashable_dict = tuple(
565
+ (
566
+ key,
567
+ tuple(value)
568
+ if isinstance(value, collections.abc.Iterable)
569
+ else value,
570
+ )
571
+ for key, value in sorted(fn(ob).items())
572
+ )
573
+ res[hashable_dict].append(ob)
574
+ except (TypeError, AttributeError):
575
+ res[tuple(fn(ob))].append(ob)
576
+ return res
577
+
578
+ @staticmethod
579
+ def get_chunks(_iter, n: int = 0, fn=None):
580
+ """
581
+ Divides an iterable into chunks of specified size or based on a given function.
582
+ Useful for batching
583
+
584
+ Parameters:
585
+ - iter: The input iterable to be divided into chunks.
586
+ - n: An integer representing the size of each chunk. Default is 0.
587
+ - fn: A function that takes the current index and the iterable as arguments and returns the size of the chunk. Default is None.
588
+
589
+ Returns:
590
+ An iterator that yields chunks of the input iterable.
591
+
592
+ Example usage:
593
+ ```
594
+ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
595
+ for chunk in chunks(data, 3):
596
+ print(chunk)
597
+ ```
598
+ Output:
599
+ ```
600
+ [1, 2, 3]
601
+ [4, 5, 6]
602
+ [7, 8, 9]
603
+ [10]
604
+ ```
605
+ """
606
+ arr = []
607
+ _iter = tuple(_iter)
608
+ for i, x in enumerate(_iter):
609
+ arr.append(x)
610
+ if len(arr) == (fn(i, _iter) if fn else n):
611
+ yield arr
612
+ arr = []
613
+
614
+ if arr:
615
+ yield arr
lm-evaluation/lm_eval/models/vllm_causallms.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from importlib.metadata import version
3
+ from importlib.util import find_spec
4
+ from typing import List, Literal, Optional, Tuple, Union
5
+
6
+ from more_itertools import distribute
7
+ from packaging.version import parse as parse_version
8
+ from tqdm import tqdm
9
+
10
+ from lm_eval.api.instance import Instance
11
+ from lm_eval.api.model import TemplateLM
12
+ from lm_eval.api.registry import register_model
13
+ from lm_eval.models.utils import Collator, undistribute
14
+ from lm_eval.utils import (
15
+ eval_logger,
16
+ get_rolling_token_windows,
17
+ make_disjoint_window,
18
+ )
19
+
20
+
21
+ try:
22
+ import ray
23
+ from vllm import LLM, SamplingParams
24
+ from vllm.transformers_utils.tokenizer import get_tokenizer
25
+ except ModuleNotFoundError:
26
+ pass
27
+
28
+ eval_logger = eval_logger
29
+
30
+
31
+ @register_model("vllm")
32
+ class VLLM(TemplateLM):
33
+ _DEFAULT_MAX_LENGTH = 2048
34
+
35
+ def __init__(
36
+ self,
37
+ pretrained="gpt2",
38
+ dtype: Literal["float16", "bfloat16", "float32", "auto"] = "auto",
39
+ revision: Optional[str] = None,
40
+ trust_remote_code: Optional[bool] = False,
41
+ tokenizer: Optional[str] = None,
42
+ tokenizer_mode: Literal["auto", "slow"] = "auto",
43
+ tokenizer_revision: Optional[str] = None,
44
+ add_bos_token: Optional[bool] = False,
45
+ prefix_token_id: Optional[int] = None,
46
+ tensor_parallel_size: int = 1,
47
+ quantization: Optional[str] = None,
48
+ max_gen_toks: int = 256,
49
+ swap_space: int = 4,
50
+ batch_size: Union[str, int] = 1,
51
+ max_batch_size=None,
52
+ max_length: int = None,
53
+ max_model_len: int = None,
54
+ seed: int = 1234,
55
+ gpu_memory_utilization: float = 0.9,
56
+ device: str = "cuda",
57
+ data_parallel_size: int = 1,
58
+ **kwargs,
59
+ ):
60
+ super().__init__()
61
+
62
+ if not find_spec("vllm"):
63
+ raise Exception(
64
+ "attempted to use 'vllm' LM type, but package `vllm` is not installed. "
65
+ "Please install vllm via `pip install lm-eval[vllm]` or `pip install -e .[vllm]`"
66
+ )
67
+
68
+ assert "cuda" in device or device is None, "vLLM only supports CUDA"
69
+ assert (
70
+ max_length is None or max_model_len is None
71
+ ), "Either max_length or max_model_len may be provided, but not both"
72
+
73
+ self._max_length = max_model_len if max_model_len is not None else max_length
74
+ self.tensor_parallel_size = int(tensor_parallel_size)
75
+ self.data_parallel_size = int(data_parallel_size)
76
+ self.model_args = {
77
+ "model": pretrained,
78
+ "gpu_memory_utilization": float(gpu_memory_utilization),
79
+ "revision": revision,
80
+ "dtype": dtype,
81
+ "tokenizer": tokenizer,
82
+ "tokenizer_mode": tokenizer_mode,
83
+ "tokenizer_revision": tokenizer_revision,
84
+ "trust_remote_code": trust_remote_code,
85
+ "tensor_parallel_size": int(tensor_parallel_size),
86
+ "max_model_len": int(self._max_length) if self._max_length else None,
87
+ "swap_space": int(swap_space),
88
+ "quantization": quantization,
89
+ "seed": int(seed),
90
+ }
91
+ self.model_args.update(kwargs)
92
+ self.batch_size = (
93
+ "auto"
94
+ if isinstance(batch_size, str) and "auto" in batch_size
95
+ else batch_size
96
+ )
97
+ if self.data_parallel_size <= 1:
98
+ self.model = LLM(**self.model_args)
99
+ else:
100
+ assert parse_version(version("vllm")) < parse_version(
101
+ "0.3.3"
102
+ ), "data_parallel is only compatible with vllm < v0.3.3."
103
+ eval_logger.warning(
104
+ "You might experience occasional issues with model weight downloading when data_parallel is in use. To ensure stable performance, run with data_parallel_size=1 until the weights are downloaded and cached."
105
+ )
106
+ self.model_args["worker_use_ray"] = True
107
+ self.batch_size = "auto"
108
+ eval_logger.info("Manual batching is not compatible with data parallelism.")
109
+
110
+ from transformers import AutoConfig
111
+
112
+ self._config = AutoConfig.from_pretrained(
113
+ pretrained, trust_remote_code=trust_remote_code, revision=revision
114
+ )
115
+ self.tokenizer = get_tokenizer(
116
+ tokenizer if tokenizer else pretrained,
117
+ tokenizer_mode=tokenizer_mode,
118
+ trust_remote_code=trust_remote_code,
119
+ tokenizer_revision=tokenizer_revision,
120
+ )
121
+ self.add_bos_token = add_bos_token
122
+ self.custom_prefix_token_id = prefix_token_id
123
+ if prefix_token_id is not None:
124
+ eval_logger.info(
125
+ f"Loglikelihood prefix token id used in evaluation: {self.prefix_token_id}"
126
+ )
127
+
128
+ self._max_gen_toks = max_gen_toks
129
+
130
+ @property
131
+ def eot_token_id(self):
132
+ # we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
133
+ return self.tokenizer.eos_token_id
134
+
135
+ @property
136
+ def prefix_token_id(self):
137
+ # it is used as prefix for loglikelihood
138
+ if self.custom_prefix_token_id is not None:
139
+ return self.custom_prefix_token_id
140
+ if self.tokenizer.bos_token_id is not None:
141
+ return self.tokenizer.bos_token_id
142
+ return self.tokenizer.eos_token_id
143
+
144
+ @property
145
+ def max_length(self):
146
+ if self._max_length: # if max length manually set, return it
147
+ return self._max_length
148
+ if self.data_parallel_size <= 1:
149
+ return self.model.llm_engine.model_config.max_model_len
150
+ else:
151
+ seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx")
152
+ for attr in seqlen_config_attrs:
153
+ if hasattr(self._config, attr):
154
+ return getattr(self._config, attr)
155
+ if hasattr(self.tokenizer, "model_max_length"):
156
+ if self.tokenizer.model_max_length == 1000000000000000019884624838656:
157
+ return self._DEFAULT_MAX_LENGTH
158
+ return self.tokenizer.model_max_length
159
+ return self._DEFAULT_MAX_LENGTH
160
+
161
+ @property
162
+ def max_gen_toks(self):
163
+ return self._max_gen_toks
164
+
165
+ def tok_encode(
166
+ self,
167
+ string: str,
168
+ left_truncate_len=None,
169
+ add_special_tokens=None,
170
+ truncation=False,
171
+ ):
172
+ """ """
173
+ if not add_special_tokens:
174
+ add_special_tokens = False or self.add_bos_token
175
+ encoding = self.tokenizer.encode(
176
+ string, add_special_tokens=add_special_tokens, truncation=truncation
177
+ )
178
+
179
+ # left-truncate the encoded context to be at most `left_truncate_len` tokens long
180
+ if left_truncate_len:
181
+ encoding = encoding[-left_truncate_len:]
182
+
183
+ return encoding
184
+
185
+ def _model_generate(
186
+ self,
187
+ requests: List[List[int]] = None,
188
+ generate: bool = False,
189
+ max_tokens: int = None,
190
+ stop: Optional[List[str]] = None,
191
+ **kwargs,
192
+ ):
193
+ if generate:
194
+ kwargs = self.modify_gen_kwargs(kwargs)
195
+ sampling_params = SamplingParams(max_tokens=max_tokens, stop=stop, **kwargs)
196
+ else:
197
+ sampling_params = SamplingParams(
198
+ temperature=0, prompt_logprobs=1, max_tokens=1
199
+ )
200
+ if self.data_parallel_size > 1:
201
+ # vLLM hangs if tensor_parallel > 1 and resources are set in ray.remote
202
+ # also seems to only work with decorator and not with ray.remote() fn
203
+ # see https://github.com/vllm-project/vllm/issues/973
204
+ # note: this has changed on 0.3.3, and it only works now if num_gpus are set.
205
+ # but then tensor_parallel breaks
206
+ @ray.remote
207
+ def run_inference_one_model(
208
+ model_args: dict, sampling_params, requests: List[List[int]]
209
+ ):
210
+ llm = LLM(**model_args)
211
+ return llm.generate(
212
+ prompt_token_ids=requests, sampling_params=sampling_params
213
+ )
214
+
215
+ # dispatch requests to all self.data_parallel_size workers, in interleaved fashion
216
+ # interleaved important to balance context lengths across workers
217
+ requests = [list(x) for x in distribute(self.data_parallel_size, requests)]
218
+ inputs = ((self.model_args, sampling_params, req) for req in requests)
219
+ object_refs = [run_inference_one_model.remote(*x) for x in inputs]
220
+ results = ray.get(object_refs)
221
+ # Invoke ray.shutdown() to prevent hang-ups if subsequent calls required.
222
+ ray.shutdown()
223
+ # flatten results
224
+ return undistribute(results)
225
+
226
+ outputs = self.model.generate(
227
+ prompt_token_ids=requests,
228
+ sampling_params=sampling_params,
229
+ use_tqdm=True if self.batch_size == "auto" else False,
230
+ )
231
+ return outputs
232
+
233
+ def loglikelihood_rolling(
234
+ self, requests: List[Instance], disable_tqdm: bool = False
235
+ ) -> List[float]:
236
+ loglikelihoods = []
237
+
238
+ for (string,) in tqdm([req.args for req in requests], disable=disable_tqdm):
239
+ rolling_token_windows = list(
240
+ map(
241
+ make_disjoint_window,
242
+ get_rolling_token_windows(
243
+ token_list=self.tok_encode(string),
244
+ prefix_token=self.eot_token_id,
245
+ max_seq_len=self.max_length - 1,
246
+ context_len=1,
247
+ ),
248
+ )
249
+ )
250
+
251
+ rolling_token_windows = [(None,) + x for x in rolling_token_windows]
252
+
253
+ string_nll = self._loglikelihood_tokens(
254
+ rolling_token_windows,
255
+ )
256
+
257
+ # discard is_greedy
258
+ string_nll = [x[0] for x in string_nll]
259
+
260
+ string_nll = sum(string_nll)
261
+ loglikelihoods.append(string_nll)
262
+ return loglikelihoods
263
+
264
+ def generate_until(
265
+ self, requests: List[Instance], disable_tqdm: bool = False
266
+ ) -> List[str]:
267
+ res = []
268
+
269
+ # batch tokenize contexts
270
+ context, all_gen_kwargs = zip(*(req.args for req in requests))
271
+ context_encoding = self.tokenizer(context, add_special_tokens=False).input_ids
272
+ requests = [
273
+ ((a, b), c) for a, b, c in zip(context, context_encoding, all_gen_kwargs)
274
+ ]
275
+
276
+ def _collate_gen(_requests):
277
+ # the negative sign on len(toks) sorts descending - this has a few advantages:
278
+ # - time estimates will always be over not underestimates, which is more useful for planning
279
+ # - to know the size of a batch when going through the list, you know the first one is always the batch
280
+ # padded context length. this is useful to simplify the batching logic and more importantly to make
281
+ # automatic adaptive batches much much easier to implement
282
+ # - any OOMs will happen right away rather than near the end
283
+ return -len(_requests[0][1]), _requests[0][0]
284
+
285
+ # we group requests by their generation_kwargs,
286
+ # so that we don't try to execute e.g. greedy sampling and temp=0.8 sampling
287
+ # in the same batch.
288
+ re_ords = Collator(requests, _collate_gen, group_by="gen_kwargs")
289
+ chunks = re_ords.get_batched(
290
+ n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
291
+ )
292
+
293
+ pbar = tqdm(
294
+ total=len(requests),
295
+ disable=(disable_tqdm or (self.rank != 0)),
296
+ desc="Running generate_until requests",
297
+ )
298
+ # for each different set of kwargs, we execute all requests, by batch.
299
+ for chunk in chunks:
300
+ context_and_encoding, all_gen_kwargs = zip(*chunk)
301
+ context, context_encoding = zip(*context_and_encoding)
302
+ # we assume all gen kwargs in the batch are the same
303
+ # this is safe to assume because the `grouper` object ensures it.
304
+ gen_kwargs = all_gen_kwargs[0]
305
+ # unpack our keyword arguments.
306
+ until = None
307
+ if isinstance(gen_kwargs, dict):
308
+ kwargs = copy.deepcopy(gen_kwargs) # edge case for repeats > 1
309
+ if "until" in kwargs.keys():
310
+ until = kwargs.pop("until")
311
+ if isinstance(until, str):
312
+ until = [until]
313
+ elif not isinstance(until, list):
314
+ raise ValueError(
315
+ f"Expected `kwargs['until']` to be of type Union[str,list] but got {until}"
316
+ )
317
+ else:
318
+ raise ValueError(
319
+ f"Expected `kwargs` to be of type `dict` but got {gen_kwargs}"
320
+ )
321
+ # add EOS token to stop sequences
322
+ eos = self.tokenizer.decode(self.eot_token_id)
323
+ if not until:
324
+ until = [eos]
325
+ else:
326
+ until.append(eos)
327
+ if "max_gen_toks" in kwargs.keys():
328
+ max_gen_toks = kwargs.pop("max_gen_toks")
329
+ else:
330
+ max_gen_toks = self.max_gen_toks
331
+
332
+ # set the max length in tokens of inputs ("context_enc")
333
+ # max len for inputs = max length, minus room to generate the max new tokens
334
+ max_ctx_len = self.max_length - max_gen_toks
335
+ context_encoding = [x[-max_ctx_len:] for x in context_encoding]
336
+
337
+ # perform batched generation
338
+ cont = self._model_generate(
339
+ requests=context_encoding,
340
+ generate=True,
341
+ max_tokens=max_gen_toks,
342
+ stop=until,
343
+ **kwargs,
344
+ )
345
+
346
+ # cache generations
347
+ for output, context in zip(cont, context):
348
+ generated_text = output.outputs[0].text
349
+ res.append(generated_text)
350
+ self.cache_hook.add_partial(
351
+ "generate_until", (context, gen_kwargs), generated_text
352
+ )
353
+ pbar.update(1)
354
+
355
+ pbar.close()
356
+ # reorder all group of results back to original unsorted form
357
+ return re_ords.get_original(res)
358
+
359
+ def _loglikelihood_tokens(
360
+ self,
361
+ requests: List[Tuple[Tuple[str, str], List[int], List[int]]],
362
+ disable_tqdm: bool = False,
363
+ ) -> List[Tuple[float, bool]]:
364
+ res = []
365
+
366
+ def _collate(x):
367
+ toks = x[1] + x[2]
368
+ return -len(toks), tuple(toks)
369
+
370
+ # Reorder requests by length and batch
371
+ re_ord = Collator(requests, sort_fn=_collate)
372
+ chunks = re_ord.get_batched(
373
+ n=int(self.batch_size) if self.batch_size != "auto" else 0, batch_fn=None
374
+ )
375
+
376
+ pbar = tqdm(
377
+ total=len(requests),
378
+ disable=disable_tqdm,
379
+ desc="Running loglikelihood requests",
380
+ )
381
+ for chunk in chunks:
382
+ inputs = []
383
+ ctxlens = []
384
+ for cache_key, context_enc, continuation_enc in chunk:
385
+ inp = (context_enc + continuation_enc)[-(self.max_length) :]
386
+ ctxlen = len(context_enc) - max(
387
+ 0, len(context_enc) + len(continuation_enc) - (self.max_length)
388
+ )
389
+
390
+ inputs.append(inp)
391
+ ctxlens.append(ctxlen)
392
+
393
+ outputs = self._model_generate(requests=inputs, generate=False)
394
+
395
+ for output, ctxlen, (cache_key, _, _), inp in zip(
396
+ outputs, ctxlens, chunk, inputs
397
+ ):
398
+ answer = self._parse_logprobs(
399
+ tokens=inp,
400
+ outputs=output,
401
+ ctxlen=ctxlen,
402
+ )
403
+
404
+ res.append(answer)
405
+
406
+ # partial caching
407
+ if cache_key is not None:
408
+ self.cache_hook.add_partial("loglikelihood", cache_key, answer)
409
+ pbar.update(1)
410
+ pbar.close()
411
+ return re_ord.get_original(res)
412
+
413
+ @staticmethod
414
+ def _parse_logprobs(tokens: List, outputs, ctxlen: int) -> Tuple[float, bool]:
415
+ """Process logprobs and tokens.
416
+
417
+ :param tokens: list
418
+ Input tokens (potentially left-truncated)
419
+ :param outputs: RequestOutput
420
+ Contains prompt_logprobs
421
+ :param ctxlen: int
422
+ Length of context (so we can slice them away and only keep the predictions)
423
+ :return:
424
+ continuation_logprobs: float
425
+ Log probabilities of continuation tokens
426
+ is_greedy: bool
427
+ Whether argmax matches given continuation exactly
428
+ """
429
+
430
+ # The first entry of prompt_logprobs is None because the model has no previous tokens to condition on.
431
+ continuation_logprobs_dicts = outputs.prompt_logprobs
432
+
433
+ def coerce_logprob_to_num(logprob):
434
+ # vLLM changed the return type of logprobs from float
435
+ # to a Logprob object storing the float value + extra data
436
+ # (https://github.com/vllm-project/vllm/pull/3065).
437
+ # If we are dealing with vllm's Logprob object, return
438
+ # the logprob value stored as an attribute. Otherwise,
439
+ # return the object itself (which should be a float
440
+ # for older versions of vLLM).
441
+ return getattr(logprob, "logprob", logprob)
442
+
443
+ continuation_logprobs_dicts = [
444
+ {
445
+ token: coerce_logprob_to_num(logprob)
446
+ for token, logprob in logprob_dict.items()
447
+ }
448
+ if logprob_dict is not None
449
+ else None
450
+ for logprob_dict in continuation_logprobs_dicts
451
+ ]
452
+
453
+ # Calculate continuation_logprobs
454
+ # assume ctxlen always >= 1
455
+ continuation_logprobs = sum(
456
+ logprob_dict.get(token)
457
+ for token, logprob_dict in zip(
458
+ tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
459
+ )
460
+ )
461
+
462
+ # Determine if is_greedy
463
+ is_greedy = True
464
+ for token, logprob_dict in zip(
465
+ tokens[ctxlen:], continuation_logprobs_dicts[ctxlen:]
466
+ ):
467
+ # Get the token with the maximum log probability from the logprob_dict
468
+ if logprob_dict: # Ensure the logprob_dict is not None
469
+ top_token = max(logprob_dict, key=logprob_dict.get)
470
+ if top_token != token:
471
+ is_greedy = False
472
+ break
473
+
474
+ return continuation_logprobs, is_greedy
475
+
476
+ @staticmethod
477
+ def modify_gen_kwargs(kwargs: dict) -> dict:
478
+ # sampling_params
479
+ do_sample = kwargs.pop("do_sample", None)
480
+ if do_sample is False or "temperature" not in kwargs:
481
+ kwargs["temperature"] = 0.0
482
+ # hf defaults
483
+ kwargs["skip_special_tokens"] = kwargs.get("skip_special_tokens", False)
484
+ kwargs["spaces_between_special_tokens"] = kwargs.get(
485
+ "spaces_between_special_tokens", False
486
+ )
487
+ return kwargs
lm-evaluation/lm_eval/prompts/__init__.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import os
3
+ from typing import Dict
4
+
5
+ from lm_eval import utils
6
+ from lm_eval.utils import eval_logger
7
+
8
+
9
+ # Prompt library.
10
+ # Stores prompts in a dictionary indexed by 2 levels:
11
+ # prompt category name, and prompt name.
12
+ # This allows us to access prompts
13
+ PROMPT_REGISTRY: Dict[str, Dict[str, str]] = {
14
+ "qa-basic": {
15
+ "question-newline-answer": "Question: {{question}}\nAnswer:",
16
+ "q-newline-a": "Q: {{question}}\nA:",
17
+ },
18
+ }
19
+
20
+
21
+ def get_prompt(prompt_id: str, dataset_name: str = None, subset_name: str = None):
22
+ # unpack prompt name
23
+ category_name, prompt_name = prompt_id.split(":")
24
+ if subset_name is None:
25
+ dataset_full_name = dataset_name
26
+ else:
27
+ dataset_full_name = f"{dataset_name}-{subset_name}"
28
+ eval_logger.info(f"Loading prompt from {category_name} for {dataset_full_name}")
29
+ if category_name == "promptsource":
30
+ try:
31
+ from promptsource.templates import DatasetTemplates
32
+ except ModuleNotFoundError:
33
+ raise Exception(
34
+ "Tried to load a Promptsource template, but promptsource is not installed ",
35
+ "please install promptsource via pip install lm-eval[promptsource] or pip install -e .[promptsource]",
36
+ )
37
+ try:
38
+ if subset_name is None:
39
+ prompts = DatasetTemplates(dataset_name=dataset_name)
40
+ else:
41
+ prompts = DatasetTemplates(
42
+ dataset_name=dataset_name, subset_name=subset_name
43
+ )
44
+ except Exception:
45
+ raise ValueError(f"{dataset_name} and {subset_name} not found")
46
+ if prompt_name in prompts.all_template_names:
47
+ return prompts[prompt_name]
48
+ else:
49
+ raise ValueError(
50
+ f"{prompt_name} not in prompt list {prompts.all_template_names}"
51
+ )
52
+ elif ".yaml" in category_name:
53
+ import yaml
54
+
55
+ with open(category_name, "rb") as file:
56
+ prompt_yaml_file = yaml.full_load(file)
57
+
58
+ prompt_string = prompt_yaml_file["prompts"][prompt_name]
59
+ return PromptString(prompt_string)
60
+ else:
61
+ try:
62
+ return PROMPT_REGISTRY[category_name][prompt_name]
63
+ except Exception:
64
+ raise ValueError(
65
+ f"expected only a single `:` as separator between \
66
+ prompt category and name, but got `{prompt_id}` instead"
67
+ )
68
+
69
+
70
+ def load_prompt_list(
71
+ use_prompt: str, dataset_name=None, subset_name=None, yaml_path=None, **kwargs
72
+ ):
73
+ category_name, prompt_name = use_prompt.split(":")
74
+
75
+ if category_name == "promptsource":
76
+ from promptsource.templates import DatasetTemplates
77
+
78
+ if subset_name is None:
79
+ prompts = DatasetTemplates(dataset_name=dataset_name)
80
+ else:
81
+ prompts = DatasetTemplates(
82
+ dataset_name=dataset_name, subset_name=subset_name
83
+ )
84
+
85
+ prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names)
86
+
87
+ elif ".yaml" in category_name:
88
+ import yaml
89
+
90
+ if yaml_path is not None:
91
+ category_name = os.path.realpath(os.path.join(yaml_path, category_name))
92
+
93
+ with open(category_name, "rb") as file:
94
+ prompt_yaml_file = yaml.full_load(file)
95
+
96
+ prompt_list = utils.pattern_match(
97
+ prompt_name, prompt_yaml_file["prompts"].keys()
98
+ )
99
+
100
+ # category_name, *prompt_name = use_prompt.split(":")
101
+ # TODO allow to multiple prompt naming
102
+ # if len(prompt_name) > 1:
103
+ # prompt_list = []
104
+ # for prompt in prompt_name:
105
+ # prompt_list.append(utils.pattern_match(prompt_name, prompts.all_template_names))
106
+ # else:
107
+ # prompt_list = utils.pattern_match(prompt_name, prompts.all_template_names)
108
+ return [":".join([category_name, prompt]) for prompt in prompt_list]
109
+
110
+
111
+ class PromptString:
112
+ def __init__(self, prompt_string):
113
+ self.prompt_string = prompt_string
114
+
115
+ def apply(self, doc):
116
+ doc_to_text = self.prompt_string["doc_to_text"]
117
+ doc_to_target = self.prompt_string["doc_to_target"]
118
+
119
+ # TODO need a way to process doc_to_choice
120
+ if "doc_to_choice" in self.prompt_string:
121
+ raise Exception("Not yet implemented to accept doc_to_choice")
122
+
123
+ text_string = utils.apply_template(doc_to_text, doc)
124
+ target_string = utils.apply_template(doc_to_target, doc)
125
+
126
+ return [text_string, target_string]
lm-evaluation/lm_eval/prompts/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
lm-evaluation/tests/__init__.py ADDED
File without changes
lm-evaluation/tests/models/test_gguf.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import json
3
+ import os
4
+ import pickle
5
+ import unittest
6
+ from unittest.mock import patch
7
+
8
+ from lm_eval.api.instance import Instance
9
+ from lm_eval.models.gguf import GGUFLM
10
+
11
+
12
+ base_url = "https://matthoffner-ggml-llm-api.hf.space"
13
+
14
+
15
+ def gguf_completion_mock(base_url=None, **kwargs):
16
+ # Generate a hash from the parameters
17
+ hash_kwargs = {"base_url": base_url, **kwargs}
18
+ hash = hashlib.sha256(
19
+ json.dumps(hash_kwargs, sort_keys=True).encode("utf-8")
20
+ ).hexdigest()
21
+
22
+ fname = f"./tests/testdata/gguf_test_{hash}.pkl"
23
+
24
+ if os.path.exists(fname):
25
+ with open(fname, "rb") as fh:
26
+ return pickle.load(fh)
27
+ else:
28
+ print("The file does not exist, attempting to write...")
29
+ if "stop" in kwargs:
30
+ result = {
31
+ "choices": [
32
+ {
33
+ "text": f"generated text until {kwargs['stop']}",
34
+ "logprobs": {"token_logprobs": [-1.2345], "text_offset": 0},
35
+ "finish_reason": "length",
36
+ }
37
+ ]
38
+ }
39
+ else:
40
+ # generated with # curl -X 'POST' 'http://localhost:8000/v1/completions' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"prompt": "string", "logprobs": 10, "temperature": 0.0, "max_tokens": 1, "echo": true}'
41
+ result = {
42
+ "id": "cmpl-4023976b-bc6a-43b0-a5a9-629f4216c7f3",
43
+ "object": "text_completion",
44
+ "created": 1700511361,
45
+ "model": "../llama-2-7b.Q8_0.gguf",
46
+ "choices": [
47
+ {
48
+ "text": "string(",
49
+ "index": 0,
50
+ "logprobs": {
51
+ "text_offset": [0, 7],
52
+ "token_logprobs": [None, -1.033263319857306],
53
+ "tokens": [" string", "("],
54
+ "top_logprobs": [
55
+ None,
56
+ {
57
+ "(": -1.033263319857306,
58
+ "[]": -2.6530743779017394,
59
+ ".": -3.0377145947291324,
60
+ "\n": -3.0399156750513976,
61
+ "_": -3.510376089937872,
62
+ " =": -3.6957918347193663,
63
+ ",": -3.9309459866358702,
64
+ " of": -4.2834550083949035,
65
+ '("': -4.322762841112799,
66
+ "()": -4.426229113466925,
67
+ },
68
+ ],
69
+ },
70
+ "finish_reason": "length",
71
+ }
72
+ ],
73
+ "usage": {
74
+ "prompt_tokens": 2,
75
+ "completion_tokens": 1,
76
+ "total_tokens": 3,
77
+ },
78
+ }
79
+
80
+ try:
81
+ os.makedirs(os.path.dirname(fname), exist_ok=True)
82
+ print("Writing file at", fname)
83
+ with open(fname, "wb") as fh:
84
+ pickle.dump(result, fh)
85
+ print("File written successfully")
86
+ except Exception as e:
87
+ print("File writing failed:", e)
88
+
89
+ return result
90
+
91
+
92
+ class GGUFLMTest(unittest.TestCase):
93
+ @patch(
94
+ "lm_eval.models.gguf.GGUFLM.gguf_completion", side_effect=gguf_completion_mock
95
+ )
96
+ def test_loglikelihood(self, gguf_completion_mock):
97
+ lm = GGUFLM(base_url)
98
+
99
+ # Test loglikelihood
100
+ requests = [
101
+ Instance(
102
+ request_type="loglikelihood",
103
+ doc=args,
104
+ arguments=args,
105
+ idx=i,
106
+ )
107
+ for i, args in enumerate([("str", "ing"), ("str", "ing")])
108
+ ]
109
+ res = lm.loglikelihood(requests)
110
+
111
+ # Assert the loglikelihood response is correct
112
+ expected_res = [(logprob, True) for logprob in [0, 0]]
113
+ self.assertEqual(res, expected_res)
114
+
115
+ @patch(
116
+ "lm_eval.models.gguf.GGUFLM.gguf_completion", side_effect=gguf_completion_mock
117
+ )
118
+ def test_generate_until(self, gguf_completion_mock):
119
+ lm = GGUFLM(base_url)
120
+
121
+ # Test generate_until
122
+ requests = [
123
+ Instance(
124
+ request_type="generate_until",
125
+ doc={"input": doc},
126
+ arguments=(doc, {"until": stop}),
127
+ idx=i,
128
+ )
129
+ for i, (doc, stop) in enumerate([("input1", "stop1"), ("input2", "stop2")])
130
+ ]
131
+
132
+ res = lm.generate_until(requests)
133
+
134
+ # Assert the generate_until response is correct
135
+ expected_res = ["generated text until stop1", "generated text until stop2"]
136
+ self.assertEqual(res, expected_res)
137
+
138
+ # @patch('lm_eval.models.gguf.GGUFLM.gguf_completion', side_effect=gguf_completion_mock)
139
+ # def test_loglikelihood_rolling(self, gguf_completion_mock):
140
+ # lm = GGUFLM(base_url)
141
+
142
+ # # Test loglikelihood_rolling
143
+ # requests = ["input1", "input2"]
144
+ # res = lm.loglikelihood_rolling(requests)
145
+
146
+ # # Assert the loglikelihood_rolling response is correct
147
+ # expected_res = [(-1.2345, True), (-1.2345, True)]
148
+ # self.assertEqual(res, expected_res)
149
+
150
+
151
+ if __name__ == "__main__":
152
+ unittest.main()
lm-evaluation/tests/models/test_huggingface.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from pathlib import Path
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ import lm_eval.tasks as tasks
10
+ from lm_eval.api.instance import Instance
11
+ from lm_eval.models.huggingface import HFLM
12
+
13
+
14
+ task_manager = tasks.TaskManager()
15
+
16
+
17
+ class Test_HFLM:
18
+ torch.use_deterministic_algorithms(True)
19
+ task_list = task_manager.load_task_or_group(["arc_easy", "gsm8k", "wikitext"])
20
+ version_minor = sys.version_info.minor
21
+ multiple_choice_task = task_list["arc_easy"] # type: ignore
22
+ multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1)
23
+ MULTIPLE_CH: list[Instance] = multiple_choice_task.instances
24
+ generate_until_task = task_list["gsm8k"] # type: ignore
25
+ generate_until_task._config.generation_kwargs["max_gen_toks"] = 10
26
+ generate_until_task.build_all_requests(limit=10, rank=0, world_size=1)
27
+ generate_until: list[Instance] = generate_until_task.instances
28
+ rolling_task = task_list["wikitext"] # type: ignore
29
+ rolling_task.build_all_requests(limit=10, rank=0, world_size=1)
30
+ ROLLING: list[Instance] = rolling_task.instances
31
+
32
+ MULTIPLE_CH_RES = [
33
+ -41.902435302734375,
34
+ -42.939308166503906,
35
+ -33.914180755615234,
36
+ -37.07139205932617,
37
+ -22.95258331298828,
38
+ -20.342208862304688,
39
+ -14.818366050720215,
40
+ -27.942853927612305,
41
+ -15.80704116821289,
42
+ -15.936427116394043,
43
+ -13.052018165588379,
44
+ -18.04828453063965,
45
+ -13.345029830932617,
46
+ -13.366025924682617,
47
+ -12.127134323120117,
48
+ -11.872495651245117,
49
+ -47.10598373413086,
50
+ -47.76410675048828,
51
+ -36.4406852722168,
52
+ -50.0289421081543,
53
+ -16.72093963623047,
54
+ -18.535587310791016,
55
+ -26.46993637084961,
56
+ -20.355995178222656,
57
+ -17.757919311523438,
58
+ -21.80595588684082,
59
+ -33.1990852355957,
60
+ -39.28636932373047,
61
+ -14.759679794311523,
62
+ -16.753942489624023,
63
+ -11.486852645874023,
64
+ -15.42177677154541,
65
+ -13.15798282623291,
66
+ -15.887393951416016,
67
+ -15.28614616394043,
68
+ -12.339089393615723,
69
+ -44.59441375732422,
70
+ -55.40888214111328,
71
+ -52.70050811767578,
72
+ -56.25089645385742,
73
+ ]
74
+ generate_until_RES = [
75
+ " The average of $2.50 each is $",
76
+ " A robe takes 2 bolts of blue fiber and half",
77
+ " $50,000 in repairs.\n\nQuestion",
78
+ " He runs 1 sprint 3 times a week.",
79
+ " They feed each of her chickens three cups of mixed",
80
+ " The price of the glasses is $5, but",
81
+ " The total percentage of students who said they like to",
82
+ " Carla is downloading a 200 GB file. Normally",
83
+ " John drives for 3 hours at a speed of 60",
84
+ " Eliza sells 4 tickets to 5 friends so she",
85
+ ]
86
+ ROLLING_RES = [
87
+ -3603.6328125,
88
+ -19779.23974609375,
89
+ -8834.16455078125,
90
+ -27967.591796875,
91
+ -7636.794982910156,
92
+ -9491.93505859375,
93
+ -41043.4248046875,
94
+ -8397.689819335938,
95
+ -45969.47155761719,
96
+ -7158.90625,
97
+ ]
98
+ LM = HFLM(pretrained="EleutherAI/pythia-70m", device="cpu", dtype="float32")
99
+
100
+ def test_logliklihood(self) -> None:
101
+ res = self.LM.loglikelihood(self.MULTIPLE_CH)
102
+ _RES, _res = self.MULTIPLE_CH_RES, [r[0] for r in res]
103
+ # log samples to CI
104
+ dir_path = Path("test_logs")
105
+ dir_path.mkdir(parents=True, exist_ok=True)
106
+
107
+ file_path = dir_path / f"outputs_log_{self.version_minor}.txt"
108
+ file_path = file_path.resolve()
109
+ with open(file_path, "w") as f:
110
+ f.write("\n".join(str(x) for x in _res))
111
+ assert np.allclose(_res, _RES, atol=1e-2)
112
+ # check indices for Multiple Choice
113
+ argmax_RES, argmax_res = (
114
+ np.argmax(np.array(_RES).reshape(-1, 4), axis=1),
115
+ np.argmax(np.array(_res).reshape(-1, 4), axis=1),
116
+ )
117
+ assert (argmax_RES == argmax_res).all()
118
+
119
+ def test_generate_until(self) -> None:
120
+ res = self.LM.generate_until(self.generate_until)
121
+ assert res == self.generate_until_RES
122
+
123
+ def test_logliklihood_rolling(self) -> None:
124
+ res = self.LM.loglikelihood_rolling(self.ROLLING)
125
+ assert np.allclose(res, self.ROLLING_RES, atol=1e-1)
126
+
127
+ def test_toc_encode(self) -> None:
128
+ res = self.LM.tok_encode("foo bar")
129
+ assert res == [12110, 2534]
130
+
131
+ def test_toc_decode(self) -> None:
132
+ res = self.LM.tok_decode([12110, 2534])
133
+ assert res == "foo bar"
134
+
135
+ def test_batch_encode(self) -> None:
136
+ res = self.LM.tok_batch_encode(["foo bar", "bar foo"])[0].tolist()
137
+ assert res == [[12110, 2534], [2009, 17374]]
138
+
139
+ def test_model_generate(self) -> None:
140
+ context = self.LM.tok_batch_encode(["foo bar"])[0]
141
+ res = self.LM._model_generate(context, max_length=10, stop=["\n\n"])
142
+ res = self.LM.tok_decode(res[0])
143
+ assert res == "foo bar\n<bazhang>!info bar"
lm-evaluation/tests/models/test_neuron_optimum.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import torch
3
+
4
+ from lm_eval.models.neuron_optimum import wrap_constant_batch_size
5
+
6
+
7
+ def test_wrap_constant_batch_size():
8
+ class Tester:
9
+ def __init__(self, batch_size):
10
+ self.batch_size = batch_size
11
+
12
+ @wrap_constant_batch_size
13
+ def test_constant_batch_size(self, inputs):
14
+ assert len(inputs) == self.batch_size
15
+ return inputs
16
+
17
+ batch_size_test = 8
18
+ for i in range(1, batch_size_test + 1):
19
+ tensor = torch.ones([i, 2, 2])
20
+ out = Tester(batch_size=batch_size_test).test_constant_batch_size(tensor)
21
+ torch.testing.assert_allclose(out, tensor)
22
+
23
+ with pytest.raises(ValueError):
24
+ Tester(batch_size=batch_size_test).test_constant_batch_size(
25
+ torch.ones([batch_size_test + 1, 2, 2])
26
+ )
lm-evaluation/tests/models/test_openvino.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import tempfile
3
+
4
+ import pytest
5
+ from optimum.intel import OVModelForCausalLM
6
+ from transformers import AutoTokenizer
7
+
8
+ import lm_eval.evaluator as evaluator
9
+ from lm_eval.api.registry import get_model
10
+
11
+
12
+ SUPPORTED_ARCHITECTURES_TASKS = {
13
+ "facebook/opt-125m": "lambada_openai",
14
+ "hf-internal-testing/tiny-random-gpt2": "wikitext",
15
+ }
16
+
17
+
18
+ @pytest.mark.parametrize("model_id,task", SUPPORTED_ARCHITECTURES_TASKS.items())
19
+ def test_evaluator(model_id, task):
20
+ with tempfile.TemporaryDirectory() as tmpdirname:
21
+ model = OVModelForCausalLM.from_pretrained(
22
+ model_id, export=True, use_cache=True
23
+ )
24
+ model.save_pretrained(tmpdirname)
25
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
26
+ tokenizer.save_pretrained(tmpdirname)
27
+
28
+ lm = get_model("openvino").create_from_arg_string(
29
+ f"pretrained={tmpdirname}",
30
+ {
31
+ "batch_size": 1,
32
+ "device": "cpu",
33
+ },
34
+ )
35
+
36
+ def ll_fn(reqs):
37
+ for ctx, cont in [req.args for req in reqs]:
38
+ if len(ctx) == 0:
39
+ continue
40
+ # space convention
41
+ assert ctx[-1] != " "
42
+ assert cont[0] == " " or ctx[-1] == "\n"
43
+
44
+ res = []
45
+
46
+ random.seed(42)
47
+ for _ in reqs:
48
+ res.append((-random.random(), False))
49
+
50
+ return res
51
+
52
+ def ll_perp_fn(reqs):
53
+ for (string,) in [req.args for req in reqs]:
54
+ assert isinstance(string, str)
55
+
56
+ res = []
57
+ random.seed(42)
58
+ for _ in reqs:
59
+ res.append(-random.random())
60
+
61
+ return res
62
+
63
+ lm.loglikelihood = ll_fn
64
+ lm.loglikelihood_rolling = ll_perp_fn
65
+
66
+ limit = 10
67
+ evaluator.simple_evaluate(
68
+ model=lm,
69
+ tasks=[task],
70
+ num_fewshot=0,
71
+ limit=limit,
72
+ bootstrap_iters=10,
73
+ )
lm-evaluation/tests/models/test_vllm.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ import pytest
4
+ import torch
5
+
6
+ import lm_eval.tasks as tasks
7
+ from lm_eval.api.instance import Instance
8
+
9
+
10
+ task_manager = tasks.TaskManager()
11
+
12
+
13
+ @pytest.mark.skip(reason="requires CUDA")
14
+ class TEST_VLLM:
15
+ vllm = pytest.importorskip("vllm")
16
+ try:
17
+ from lm_eval.models.vllm_causallms import VLLM
18
+
19
+ LM = VLLM(pretrained="EleutherAI/pythia-70m")
20
+ except ModuleNotFoundError:
21
+ pass
22
+ torch.use_deterministic_algorithms(True)
23
+ task_list = task_manager.load_task_or_group(["arc_easy", "gsm8k", "wikitext"])
24
+ multiple_choice_task = task_list["arc_easy"] # type: ignore
25
+ multiple_choice_task.build_all_requests(limit=10, rank=0, world_size=1)
26
+ MULTIPLE_CH: List[Instance] = multiple_choice_task.instances
27
+ generate_until_task = task_list["gsm8k"] # type: ignore
28
+ generate_until_task._config.generation_kwargs["max_gen_toks"] = 10
29
+ generate_until_task.build_all_requests(limit=10, rank=0, world_size=1)
30
+ generate_until: List[Instance] = generate_until_task.instances
31
+ rolling_task = task_list["wikitext"] # type: ignore
32
+ rolling_task.build_all_requests(limit=10, rank=0, world_size=1)
33
+ ROLLING: List[Instance] = rolling_task.instances
34
+
35
+ # TODO: make proper tests
36
+ def test_logliklihood(self) -> None:
37
+ res = self.LM.loglikelihood(self.MULTIPLE_CH)
38
+ assert len(res) == len(self.MULTIPLE_CH)
39
+ for x in res:
40
+ assert isinstance(x[0], float)
41
+
42
+ def test_generate_until(self) -> None:
43
+ res = self.LM.generate_until(self.generate_until)
44
+ assert len(res) == len(self.generate_until)
45
+ for x in res:
46
+ assert isinstance(x, str)
47
+
48
+ def test_logliklihood_rolling(self) -> None:
49
+ res = self.LM.loglikelihood_rolling(self.ROLLING)
50
+ for x in res:
51
+ assert isinstance(x, float)
lm-evaluation/tests/test_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import pytest
4
+
5
+ import lm_eval.__main__
6
+
7
+
8
+ def test_cli_parse_error():
9
+ """
10
+ Assert error raised if cli args argument doesn't have type
11
+ """
12
+ with pytest.raises(ValueError):
13
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
14
+ parser.add_argument(
15
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
16
+ )
17
+ parser.add_argument(
18
+ "--tasks",
19
+ "-t",
20
+ default=None,
21
+ metavar="task1,task2",
22
+ help="To get full list of tasks, use the command lm-eval --tasks list",
23
+ )
24
+ lm_eval.__main__.check_argument_types(parser)
25
+
26
+
27
+ def test_cli_parse_no_error():
28
+ """
29
+ Assert typed arguments are parsed correctly
30
+ """
31
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
32
+ parser.add_argument(
33
+ "--model", "-m", type=str, default="hf", help="Name of model e.g. `hf`"
34
+ )
35
+ parser.add_argument(
36
+ "--tasks",
37
+ "-t",
38
+ type=str,
39
+ default=None,
40
+ metavar="task1,task2",
41
+ help="To get full list of tasks, use the command lm-eval --tasks list",
42
+ )
43
+ lm_eval.__main__.check_argument_types(parser)
lm-evaluation/tests/test_evaluator.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import lm_eval.base as base
2
+ from typing import List
3
+
4
+ import pytest
5
+
6
+ # import lm_eval.models as models
7
+ import lm_eval.api as api
8
+ import lm_eval.evaluator as evaluator
9
+ from lm_eval import tasks
10
+
11
+
12
+ # TODO: more fine grained unit tests rather than this big honking integration
13
+ # test once we break evaluator into smaller, more manageable pieces
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "task_name,limit,model,model_args",
18
+ [
19
+ (
20
+ ["arc_easy"],
21
+ 10,
22
+ "hf",
23
+ "pretrained=EleutherAI/pythia-160m,dtype=float32,device=cpu",
24
+ )
25
+ ],
26
+ )
27
+ def test_evaluator(task_name: List[str], limit: int, model: str, model_args: str):
28
+ task_name = task_name
29
+ limit = 10
30
+
31
+ e1 = evaluator.simple_evaluate(
32
+ model=model,
33
+ tasks=task_name,
34
+ limit=limit,
35
+ model_args=model_args,
36
+ )
37
+ assert e1 is not None
38
+
39
+ lm = api.registry.get_model(model).create_from_arg_string(
40
+ model_args,
41
+ {
42
+ "batch_size": None,
43
+ "max_batch_size": None,
44
+ "device": None,
45
+ },
46
+ )
47
+ task_manager = tasks.TaskManager()
48
+ task_dict = tasks.get_task_dict(task_name, task_manager)
49
+
50
+ e2 = evaluator.evaluate(
51
+ lm=lm,
52
+ task_dict=task_dict,
53
+ limit=limit,
54
+ )
55
+
56
+ assert e2 is not None
57
+ # check that caching is working
58
+
59
+ def r(x):
60
+ return x["results"]["arc_easy"]
61
+
62
+ assert all(
63
+ x == y
64
+ for x, y in zip([y for _, y in r(e1).items()], [y for _, y in r(e2).items()])
65
+ )
lm-evaluation/tests/test_janitor.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from lm_eval.decontamination.janitor import (
4
+ Janitor,
5
+ form_ngrams,
6
+ split_indices,
7
+ word_ngrams,
8
+ word_ngrams_indices,
9
+ )
10
+
11
+
12
+ def simple_ngram(sequence, n):
13
+ ngrams = list()
14
+ ngram = []
15
+ for x in sequence:
16
+ ngram.append(x)
17
+ if len(ngram) == n:
18
+ ngrams.append(tuple(ngram))
19
+ ngram = ngram[1:]
20
+
21
+ return ngrams
22
+
23
+
24
+ def test_form_ngrams():
25
+ sequence = (
26
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
27
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
28
+ )
29
+
30
+ n_values = [1, 2, 3, 5, 13]
31
+ for n in n_values:
32
+ comparison = simple_ngram(sequence, n)
33
+ result_to_test = list(form_ngrams(iter(sequence), n))
34
+ assert len(comparison) == len(result_to_test)
35
+ assert comparison == result_to_test
36
+
37
+
38
+ def test_word_ngrams():
39
+ sequence = (
40
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
41
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
42
+ )
43
+
44
+ words = sequence.split()
45
+
46
+ n_values = [1, 2, 3, 5, 13]
47
+ for n in n_values:
48
+ comparison = simple_ngram(words, n)
49
+ comparison = [" ".join(ngram) for ngram in comparison]
50
+ result_to_test = list(word_ngrams(sequence, n))
51
+ assert len(comparison) == len(result_to_test)
52
+ assert result_to_test == comparison
53
+
54
+
55
+ def test_split_indices():
56
+ sequence = (
57
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
58
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
59
+ )
60
+
61
+ comparison = []
62
+ current_word = ""
63
+ for i, c in enumerate(sequence):
64
+ if c != " ":
65
+ current_word += c
66
+ else:
67
+ if current_word:
68
+ comparison.append((current_word, (i - len(current_word), i - 1)))
69
+ current_word = ""
70
+
71
+ if current_word:
72
+ comparison.append(
73
+ (current_word, (len(sequence) - len(current_word), len(sequence) - 1))
74
+ )
75
+ current_word = ""
76
+
77
+ result_to_test = list(split_indices(sequence))
78
+ assert len(comparison) == len(result_to_test)
79
+ assert comparison == result_to_test
80
+
81
+
82
+ def test_word_ngrams_indices():
83
+ sequence = (
84
+ "Hello my name is Bob, I like eating pizza, chicken, chips and ice cream. Maybe I should eat some"
85
+ " more salad but it's so booooring. I just... like eating pizza, chicken, chips and ice cream so much."
86
+ )
87
+
88
+ n_values = [1, 2, 3, 5, 13]
89
+
90
+ for n in n_values:
91
+ ngrams = [" ".join(ngram) for ngram in simple_ngram(sequence.split(), n)]
92
+ tracker = defaultdict(int)
93
+ comparison = []
94
+ for ngram in ngrams:
95
+ while True:
96
+ start = sequence.find(ngram, tracker[ngram])
97
+ assert start != -1 # testing the test
98
+
99
+ end = start + len(ngram) - 1
100
+ tracker[ngram] = end + 1
101
+
102
+ # ignore partial word matches
103
+ if (start != 0 and sequence[start - 1] != " ") or (
104
+ end != len(sequence) - 1 and sequence[end + 1] != " "
105
+ ):
106
+ pass
107
+ else:
108
+ break
109
+
110
+ comparison.append((ngram, (start, end)))
111
+
112
+ result_to_test = list(word_ngrams_indices(sequence, n))
113
+ assert len(result_to_test) == len(comparison)
114
+ assert result_to_test == comparison
115
+
116
+
117
+ # Assumptions from GPT3 Paper:
118
+ # the 200 characters to remove include punctuation and is actually a half-window
119
+
120
+
121
+ # All tests below initially test without any registered contaminants, expecting the same sequence back.
122
+ def test_janitor1():
123
+ # First test using a 1gram and expected the first block before the filth to have some remaining
124
+ # characters, but the second block should be completely removed.
125
+
126
+ sequence = (
127
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
128
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
129
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
130
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
131
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
132
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
133
+ "FILTH. "
134
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
135
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
136
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
137
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
138
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
139
+ )
140
+
141
+ filth = "filth"
142
+
143
+ expected_result = (
144
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
145
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
146
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
147
+ "This is a @line #containing "
148
+ )
149
+
150
+ janitor = Janitor(
151
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
152
+ )
153
+ result = janitor.clean_python(sequence)
154
+ result = "".join(result)
155
+ assert result == sequence
156
+
157
+ janitor.register_contaminant(filth)
158
+ assert janitor.dirt_ngrams == {filth}
159
+
160
+ result = janitor.clean_python(sequence)
161
+ result = "".join(result)
162
+ assert result == expected_result
163
+
164
+
165
+ def test_janitor2():
166
+ # Second test using a 1gram and expected the first block before the filth to have some remaining
167
+ # characters, and the second block is longer then 200 characters so should also have some remaining.
168
+
169
+ sequence = (
170
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
171
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
172
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
173
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
174
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
175
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
176
+ "FILTH. "
177
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
178
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
179
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
180
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
181
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
182
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
183
+ )
184
+
185
+ filth = "filth"
186
+
187
+ expected_result = (
188
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
189
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
190
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
191
+ "This is a @line #containing "
192
+ " characters, 76 to be exact. "
193
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
194
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
195
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
196
+ )
197
+
198
+ janitor = Janitor(
199
+ ngram_n=1, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
200
+ )
201
+ result = janitor.clean_python(sequence)
202
+ result = "".join(result)
203
+ assert result == sequence
204
+
205
+ janitor.register_contaminant(filth)
206
+ assert janitor.dirt_ngrams == {filth}
207
+
208
+ result = janitor.clean_python(sequence)
209
+ result = "".join(result)
210
+ assert result == expected_result
211
+
212
+
213
+ def test_janitor3():
214
+ # Same test as above but with a 6gram.
215
+
216
+ sequence = (
217
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
218
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
219
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
220
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
221
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
222
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
223
+ "FILTH. lots of dirty filtHy FIlTh "
224
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
225
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
226
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
227
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
228
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
229
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
230
+ )
231
+
232
+ filth = "filth lots of dirty filthy filth"
233
+
234
+ expected_result = (
235
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
236
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
237
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
238
+ "This is a @line #containing "
239
+ " characters, 76 to be exact. "
240
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
241
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
242
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
243
+ )
244
+
245
+ janitor = Janitor(
246
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
247
+ )
248
+ result = janitor.clean_python(sequence)
249
+ result = "".join(result)
250
+ assert result == sequence
251
+
252
+ janitor.register_contaminant(filth)
253
+ assert janitor.dirt_ngrams == {filth}
254
+
255
+ result = janitor.clean_python(sequence)
256
+ result = "".join(result)
257
+ assert result == expected_result
258
+
259
+
260
+ def test_janitor4():
261
+ # This test adds another block to that from the previous. The middle block should be entirely
262
+ # removed as the 200 characters are removed from each side.
263
+
264
+ sequence = (
265
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
266
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
267
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
268
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
269
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
270
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
271
+ "FILTH. lots of dirty filtHy FIlTh "
272
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
273
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
274
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
275
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
276
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
277
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
278
+ "FILTH. lots of dirty filtHy FIlTh "
279
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
280
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
281
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
282
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
283
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
284
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
285
+ )
286
+
287
+ filth = "filth lots of dirty filthy filth"
288
+
289
+ expected_result = (
290
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
291
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
292
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
293
+ "This is a @line #containing "
294
+ " characters, 76 to be exact. "
295
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
296
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
297
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
298
+ )
299
+
300
+ janitor = Janitor(
301
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
302
+ )
303
+ result = janitor.clean_python(sequence)
304
+ result = "".join(result)
305
+ assert result == sequence
306
+
307
+ janitor.register_contaminant(filth)
308
+ assert janitor.dirt_ngrams == {filth}
309
+
310
+ result = janitor.clean_python(sequence)
311
+ result = "".join(result)
312
+ assert result == expected_result
313
+
314
+
315
+ def test_janitor5():
316
+ # Same as above but using multiple different filth 6grams.
317
+
318
+ sequence = (
319
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
320
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
321
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
322
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
323
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
324
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
325
+ "FILTH. lots of dirty filtHy FIlTh "
326
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
327
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
328
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
329
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
330
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
331
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
332
+ "FILTH. lots of filtHy dirty FIlTh "
333
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
334
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
335
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
336
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
337
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
338
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
339
+ )
340
+
341
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
342
+
343
+ expected_result = (
344
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
345
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
346
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
347
+ "This is a @line #containing "
348
+ " characters, 76 to be exact. "
349
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
350
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
351
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
352
+ )
353
+
354
+ janitor = Janitor(
355
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
356
+ )
357
+ result = janitor.clean_python(sequence)
358
+ result = "".join(result)
359
+ assert result == sequence
360
+
361
+ for filth in filths:
362
+ janitor.register_contaminant(filth)
363
+ assert janitor.dirt_ngrams == set(filths)
364
+
365
+ result = janitor.clean_python(sequence)
366
+ result = "".join(result)
367
+ assert result == expected_result
368
+
369
+
370
+ def test_janitor6():
371
+ # Same as above but now we add 10 filths and expect the same result, the following test does 11.
372
+
373
+ sequence = (
374
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
375
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
376
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
377
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
378
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
379
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
380
+ "FILTH. lots of dirty filtHy FIlTh "
381
+ "FILTH. lots of dirty filtHy FIlTh "
382
+ "FILTH. lots of dirty filtHy FIlTh "
383
+ "FILTH. lots of dirty filtHy FIlTh "
384
+ "FILTH. lots of dirty filtHy FIlTh "
385
+ "FILTH. lots of dirty filtHy FIlTh "
386
+ "FILTH. lots of dirty filtHy FIlTh "
387
+ "FILTH. lots of dirty filtHy FIlTh "
388
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
389
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
390
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
391
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
392
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
393
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
394
+ "FILTH. lots of filtHy dirty FIlTh "
395
+ "FILTH. lots of filtHy dirty FIlTh "
396
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
397
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
398
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
399
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
400
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
401
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
402
+ )
403
+
404
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
405
+
406
+ expected_result = (
407
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
408
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
409
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
410
+ "This is a @line #containing "
411
+ " characters, 76 to be exact. "
412
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
413
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
414
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
415
+ )
416
+
417
+ janitor = Janitor(
418
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
419
+ )
420
+ result = janitor.clean_python(sequence)
421
+ result = "".join(result)
422
+ assert result == sequence
423
+
424
+ for filth in filths:
425
+ janitor.register_contaminant(filth)
426
+ assert janitor.dirt_ngrams == set(filths)
427
+
428
+ result = janitor.clean_python(sequence)
429
+ result = "".join(result)
430
+ assert result == expected_result
431
+
432
+
433
+ def test_janitor7():
434
+ # Same as above but now we add 9 filths and expect the same result, the following test does 10.
435
+
436
+ sequence = (
437
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
438
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
439
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
440
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
441
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
442
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
443
+ "FILTH. lots of dirty filtHy FIlTh "
444
+ "FILTH. lots of dirty filtHy FIlTh "
445
+ "FILTH. lots of dirty filtHy FIlTh "
446
+ "FILTH. lots of dirty filtHy FIlTh "
447
+ "FILTH. lots of dirty filtHy FIlTh "
448
+ "FILTH. lots of dirty filtHy FIlTh "
449
+ "FILTH. lots of dirty filtHy FIlTh "
450
+ "FILTH. lots of dirty filtHy FIlTh "
451
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
452
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
453
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
454
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
455
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
456
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
457
+ "FILTH. lots of filtHy dirty FIlTh "
458
+ "FILTH. lots of filtHy dirty FIlTh "
459
+ "FILTH. lots of filtHy dirty FIlTh "
460
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
461
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
462
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
463
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
464
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
465
+ "This is a @line #containing a certain number of characters, 76 to be exact. "
466
+ )
467
+
468
+ filths = ["filth lots of dirty filthy filth", "filth lots of filthy dirty filth"]
469
+
470
+ expected_result = ""
471
+
472
+ janitor = Janitor(
473
+ ngram_n=6, window_to_remove=200, too_dirty_cutoff=10, minimum_slice_length=200
474
+ )
475
+ result = janitor.clean_python(sequence)
476
+ result = "".join(result)
477
+ assert result == sequence
478
+
479
+ for filth in filths:
480
+ janitor.register_contaminant(filth)
481
+ assert janitor.dirt_ngrams == set(filths)
482
+
483
+ result = janitor.clean_python(sequence)
484
+ result = "".join(result)
485
+ assert result == expected_result
486
+
487
+
488
+ def test_janitor8():
489
+ # This will test the save and load contams
490
+ pass
491
+ # source = """ ,, I'm a very !dirty,, ,, dirty boy. Clean me daddy. \n\nhe he he hehe heh. lastword """ * 2
492
+ # contaminant = "dirty boy. Clean he he"
493
+
494
+ # jan = Janitor(ngram_n=3)
495
+ # jan.register_contaminant(contaminant)
496
+ # cleaned = " ".join(jan.clean(source))
497
+ # for contam in jan.dirt_ngrams:
498
+ # assert contam not in cleaned, contam
499
+
500
+ # filename = "data/saved_contam"
501
+ # jan.save_contamination_ngrams(filename)
502
+
503
+ # jan = Janitor(ngram_n=3)
504
+ # jan.load_contamination_ngrams(filename)
505
+ # cleaned = " ".join(jan.clean(source))
506
+ # for contam in jan.dirt_ngrams:
507
+ # assert contam not in cleaned, contam