[build-system] requires = ["setuptools>=40.8.0", "wheel"] build-backend = "setuptools.build_meta" [project] name = "lm_eval" version = "0.4.2" authors = [ {name="EleutherAI", email="contact@eleuther.ai"} ] description = "A framework for evaluating language models" readme = "README.md" classifiers = [ "Development Status :: 3 - Alpha", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ] requires-python = ">=3.8" license = { "text" = "MIT" } dependencies = [ "accelerate>=0.21.0", "evaluate", "datasets>=2.16.0", "evaluate>=0.4.0", "jsonlines", "numexpr", "peft>=0.2.0", "pybind11>=2.6.2", "pytablewriter", "rouge-score>=0.0.4", "sacrebleu>=1.5.0", "scikit-learn>=0.24.1", "sqlitedict", "torch>=1.8", "tqdm-multiprocess", "transformers>=4.1", "zstandard", "dill", "word2number", "more_itertools", ] [tool.setuptools.packages.find] include = ["lm_eval*"] # required to include yaml files in pip installation [tool.setuptools.package-data] lm_eval = ["**/*.yaml", "tasks/**/*"] [project.scripts] lm-eval = "lm_eval.__main__:cli_evaluate" lm_eval = "lm_eval.__main__:cli_evaluate" [project.urls] Homepage = "https://github.com/EleutherAI/lm-evaluation-harness" Repository = "https://github.com/EleutherAI/lm-evaluation-harness" [project.optional-dependencies] anthropic = ["anthropic"] dev = ["pytest", "pytest-cov", "pytest-xdist", "pre-commit", "mypy"] gptq = ["auto-gptq[triton]>=0.6.0"] hf_transfer = ["hf_transfer"] ifeval = ["langdetect", "immutabledict"] neuronx = ["optimum[neuronx]"] mamba = ["mamba_ssm", "causal-conv1d==1.0.2"] math = ["sympy>=1.12", "antlr4-python3-runtime==4.11"] multilingual = ["nagisa>=0.2.7", "jieba>=0.42.1", "pycountry"] openai = ["openai==1.3.9", "tiktoken"] optimum = ["optimum[openvino]"] promptsource = ["promptsource>=0.2.3"] sentencepiece = ["sentencepiece>=0.1.98", "protobuf>=4.22.1"] testing = ["pytest", "pytest-cov", "pytest-xdist"] vllm = ["vllm==0.3.2"] zeno = ["pandas", "zeno-client"] wandb = ["wandb>=0.16.3", "pandas", "numpy"] all = [ "lm_eval[anthropic]", "lm_eval[dev]", "lm_eval[gptq]", "lm_eval[hf_transfer]", "lm_eval[ifeval]", "lm_eval[mamba]", "lm_eval[math]", "lm_eval[multilingual]", "lm_eval[openai]", "lm_eval[promptsource]", "lm_eval[sentencepiece]", "lm_eval[testing]", "lm_eval[vllm]", "lm_eval[zeno]", "lm_eval[wandb]", ] [tool.ruff.lint] extend-select = ["I"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["lm_eval"] [tool.ruff.lint.extend-per-file-ignores] "__init__.py" = ["F401","F402","F403"]