diff --git a/llmeval-env/bin/Activate.ps1 b/llmeval-env/bin/Activate.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..b49d77ba44b24fe6d69f6bbe75139b3b5dc23075 --- /dev/null +++ b/llmeval-env/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/llmeval-env/bin/accelerate b/llmeval-env/bin/accelerate new file mode 100644 index 0000000000000000000000000000000000000000..069bc54ee5b124dbdecb96a67f7248b85adf29e1 --- /dev/null +++ b/llmeval-env/bin/accelerate @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.accelerate_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/accelerate-config b/llmeval-env/bin/accelerate-config new file mode 100644 index 0000000000000000000000000000000000000000..fc53ed4f0efcf069738170ee649e1f4565b95ab6 --- /dev/null +++ b/llmeval-env/bin/accelerate-config @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.config import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/accelerate-estimate-memory b/llmeval-env/bin/accelerate-estimate-memory new file mode 100644 index 0000000000000000000000000000000000000000..be4b0f1e326ff532e90fbc253240c2a5c69011bd --- /dev/null +++ b/llmeval-env/bin/accelerate-estimate-memory @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.estimate import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/accelerate-launch b/llmeval-env/bin/accelerate-launch new file mode 100644 index 0000000000000000000000000000000000000000..c880e0ea5b461b4614194535f391c88928d36075 --- /dev/null +++ b/llmeval-env/bin/accelerate-launch @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.launch import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/activate b/llmeval-env/bin/activate new file mode 100644 index 0000000000000000000000000000000000000000..ea3cecb76ef3a900a4eb4703d30cc5e47be344a8 --- /dev/null +++ b/llmeval-env/bin/activate @@ -0,0 +1,69 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/mnt/weka/peacock/llm_eval/llmeval-env" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(llmeval-env) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(llmeval-env) " + export VIRTUAL_ENV_PROMPT +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/llmeval-env/bin/activate.csh b/llmeval-env/bin/activate.csh new file mode 100644 index 0000000000000000000000000000000000000000..afab03e28c86fd7597927e0fc4fc03d1551910b7 --- /dev/null +++ b/llmeval-env/bin/activate.csh @@ -0,0 +1,26 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/mnt/weka/peacock/llm_eval/llmeval-env" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(llmeval-env) $prompt" + setenv VIRTUAL_ENV_PROMPT "(llmeval-env) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/llmeval-env/bin/activate.fish b/llmeval-env/bin/activate.fish new file mode 100644 index 0000000000000000000000000000000000000000..4ffee2640e972b53c610062322ff4498320f91cb --- /dev/null +++ b/llmeval-env/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/mnt/weka/peacock/llm_eval/llmeval-env" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(llmeval-env) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(llmeval-env) " +end diff --git a/llmeval-env/bin/chardetect b/llmeval-env/bin/chardetect new file mode 100644 index 0000000000000000000000000000000000000000..98b50c4b329a45893c0945c2ae5c986dfb80868e --- /dev/null +++ b/llmeval-env/bin/chardetect @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from chardet.cli.chardetect import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/convert-caffe2-to-onnx b/llmeval-env/bin/convert-caffe2-to-onnx new file mode 100644 index 0000000000000000000000000000000000000000..49cfdf0f992ee8a63646be1065bc5ebb0d773dff --- /dev/null +++ b/llmeval-env/bin/convert-caffe2-to-onnx @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from caffe2.python.onnx.bin.conversion import caffe2_to_onnx +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(caffe2_to_onnx()) diff --git a/llmeval-env/bin/convert-onnx-to-caffe2 b/llmeval-env/bin/convert-onnx-to-caffe2 new file mode 100644 index 0000000000000000000000000000000000000000..ef36d9438f56b678b6c45f665a60d78a5080ab73 --- /dev/null +++ b/llmeval-env/bin/convert-onnx-to-caffe2 @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from caffe2.python.onnx.bin.conversion import onnx_to_caffe2 +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(onnx_to_caffe2()) diff --git a/llmeval-env/bin/datasets-cli b/llmeval-env/bin/datasets-cli new file mode 100644 index 0000000000000000000000000000000000000000..7e0831491c6ccc663bafb804c779e18d093c537f --- /dev/null +++ b/llmeval-env/bin/datasets-cli @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from datasets.commands.datasets_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/evaluate-cli b/llmeval-env/bin/evaluate-cli new file mode 100644 index 0000000000000000000000000000000000000000..bfa6b0d56734573a34500028fcb3c218eb51dcac --- /dev/null +++ b/llmeval-env/bin/evaluate-cli @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from evaluate.commands.evaluate_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/f2py b/llmeval-env/bin/f2py new file mode 100644 index 0000000000000000000000000000000000000000..df4ab644f86af10ec0e69377e5c9d5ecba313503 --- /dev/null +++ b/llmeval-env/bin/f2py @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/get_gprof b/llmeval-env/bin/get_gprof new file mode 100644 index 0000000000000000000000000000000000000000..41ad9868cef62efae009823a075e25725a5b023a --- /dev/null +++ b/llmeval-env/bin/get_gprof @@ -0,0 +1,75 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +''' +build profile graph for the given instance + +running: + $ get_gprof + +executes: + gprof2dot -f pstats .prof | dot -Tpng -o .call.png + +where: + are arguments for gprof2dot, such as "-n 5 -e 5" + is code to create the instance to profile + is the class of the instance (i.e. type(instance)) + +For example: + $ get_gprof -n 5 -e 1 "import numpy; numpy.array([1,2])" + +will create 'ndarray.call.png' with the profile graph for numpy.array([1,2]), +where '-n 5' eliminates nodes below 5% threshold, similarly '-e 1' eliminates +edges below 1% threshold +''' + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + print ("Please provide an object instance (e.g. 'import math; math.pi')") + sys.exit() + # grab args for gprof2dot + args = sys.argv[1:-1] + args = ' '.join(args) + # last arg builds the object + obj = sys.argv[-1] + obj = obj.split(';') + # multi-line prep for generating an instance + for line in obj[:-1]: + exec(line) + # one-line generation of an instance + try: + obj = eval(obj[-1]) + except Exception: + print ("Error processing object instance") + sys.exit() + + # get object 'name' + objtype = type(obj) + name = getattr(objtype, '__name__', getattr(objtype, '__class__', objtype)) + + # profile dumping an object + import dill + import os + import cProfile + #name = os.path.splitext(os.path.basename(__file__))[0] + cProfile.run("dill.dumps(obj)", filename="%s.prof" % name) + msg = "gprof2dot -f pstats %s %s.prof | dot -Tpng -o %s.call.png" % (args, name, name) + try: + res = os.system(msg) + except Exception: + print ("Please verify install of 'gprof2dot' to view profile graphs") + if res: + print ("Please verify install of 'gprof2dot' to view profile graphs") + + # get stats + f_prof = "%s.prof" % name + import pstats + stats = pstats.Stats(f_prof, stream=sys.stdout) + stats.strip_dirs().sort_stats('cumtime') + stats.print_stats(20) #XXX: save to file instead of print top 20? + os.remove(f_prof) diff --git a/llmeval-env/bin/get_objgraph b/llmeval-env/bin/get_objgraph new file mode 100644 index 0000000000000000000000000000000000000000..3d9f8bc52a7fe11be5de5f3567abe38e26293af4 --- /dev/null +++ b/llmeval-env/bin/get_objgraph @@ -0,0 +1,54 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +display the reference paths for objects in ``dill.types`` or a .pkl file + +Notes: + the generated image is useful in showing the pointer references in + objects that are or can be pickled. Any object in ``dill.objects`` + listed in ``dill.load_types(picklable=True, unpicklable=True)`` works. + +Examples:: + + $ get_objgraph ArrayType + Image generated as ArrayType.png +""" + +import dill as pickle +#pickle.debug.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types +load_types(pickleable=True,unpickleable=True) +from dill import objects + +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print ("Please provide exactly one file or type name (e.g. 'IntType')") + msg = "\n" + for objtype in list(objects.keys())[:40]: + msg += objtype + ', ' + print (msg + "...") + else: + objtype = str(sys.argv[-1]) + try: + obj = objects[objtype] + except KeyError: + obj = pickle.load(open(objtype,'rb')) + import os + objtype = os.path.splitext(objtype)[0] + try: + import objgraph + objgraph.show_refs(obj, filename=objtype+'.png') + except ImportError: + print ("Please install 'objgraph' to view object graphs") + + +# EOF diff --git a/llmeval-env/bin/huggingface-cli b/llmeval-env/bin/huggingface-cli new file mode 100644 index 0000000000000000000000000000000000000000..135b36761d3b8f72275e52b7465722c166cd2f77 --- /dev/null +++ b/llmeval-env/bin/huggingface-cli @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from huggingface_hub.commands.huggingface_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/isympy b/llmeval-env/bin/isympy new file mode 100644 index 0000000000000000000000000000000000000000..9ed1989443059ec16c0f27d2b07b61c0661d2615 --- /dev/null +++ b/llmeval-env/bin/isympy @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from isympy import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/lm-eval b/llmeval-env/bin/lm-eval new file mode 100644 index 0000000000000000000000000000000000000000..e3b9c6a12779f40be674c53dc0b2ce0c4ea0052c --- /dev/null +++ b/llmeval-env/bin/lm-eval @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from lm_eval.__main__ import cli_evaluate +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_evaluate()) diff --git a/llmeval-env/bin/lm_eval b/llmeval-env/bin/lm_eval new file mode 100644 index 0000000000000000000000000000000000000000..e3b9c6a12779f40be674c53dc0b2ce0c4ea0052c --- /dev/null +++ b/llmeval-env/bin/lm_eval @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from lm_eval.__main__ import cli_evaluate +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_evaluate()) diff --git a/llmeval-env/bin/nltk b/llmeval-env/bin/nltk new file mode 100644 index 0000000000000000000000000000000000000000..4edc4c6d4f2f2221dbc28313e4cd5906d3d41741 --- /dev/null +++ b/llmeval-env/bin/nltk @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from nltk.cli import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/llmeval-env/bin/normalizer b/llmeval-env/bin/normalizer new file mode 100644 index 0000000000000000000000000000000000000000..2089e7ad3f6560b8973b7e0958f29294c035ab30 --- /dev/null +++ b/llmeval-env/bin/normalizer @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_detect()) diff --git a/llmeval-env/bin/pip b/llmeval-env/bin/pip new file mode 100644 index 0000000000000000000000000000000000000000..cee0bdf8b927e5d5d6e5c305e1ff05d041f489c4 --- /dev/null +++ b/llmeval-env/bin/pip @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/pip3 b/llmeval-env/bin/pip3 new file mode 100644 index 0000000000000000000000000000000000000000..cee0bdf8b927e5d5d6e5c305e1ff05d041f489c4 --- /dev/null +++ b/llmeval-env/bin/pip3 @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/pip3.10 b/llmeval-env/bin/pip3.10 new file mode 100644 index 0000000000000000000000000000000000000000..cee0bdf8b927e5d5d6e5c305e1ff05d041f489c4 --- /dev/null +++ b/llmeval-env/bin/pip3.10 @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/pybind11-config b/llmeval-env/bin/pybind11-config new file mode 100644 index 0000000000000000000000000000000000000000..95b666bcfbe032316115919e87b440caf7a903fc --- /dev/null +++ b/llmeval-env/bin/pybind11-config @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pybind11.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/sacrebleu b/llmeval-env/bin/sacrebleu new file mode 100644 index 0000000000000000000000000000000000000000..44f8d005df4412449fb5159b7fc4d389acdd3fa6 --- /dev/null +++ b/llmeval-env/bin/sacrebleu @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from sacrebleu.sacrebleu import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/tabulate b/llmeval-env/bin/tabulate new file mode 100644 index 0000000000000000000000000000000000000000..56bdc38db380b8430095c55cfe6a3680d21c2ad4 --- /dev/null +++ b/llmeval-env/bin/tabulate @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tabulate import _main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(_main()) diff --git a/llmeval-env/bin/torchrun b/llmeval-env/bin/torchrun new file mode 100644 index 0000000000000000000000000000000000000000..cdb78a66379142312467e24dce094b0be234eddc --- /dev/null +++ b/llmeval-env/bin/torchrun @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from torch.distributed.run import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/tqdm b/llmeval-env/bin/tqdm new file mode 100644 index 0000000000000000000000000000000000000000..3af282b101f3c5de602877eb77b9697fb4facfb0 --- /dev/null +++ b/llmeval-env/bin/tqdm @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tqdm.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/transformers-cli b/llmeval-env/bin/transformers-cli new file mode 100644 index 0000000000000000000000000000000000000000..7b00cc0322b7709dafb2a254643b12e633d31cc3 --- /dev/null +++ b/llmeval-env/bin/transformers-cli @@ -0,0 +1,8 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from transformers.commands.transformers_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/llmeval-env/bin/undill b/llmeval-env/bin/undill new file mode 100644 index 0000000000000000000000000000000000000000..fa969d8dccd83fab2be7b7084fe1e316ba6f91d8 --- /dev/null +++ b/llmeval-env/bin/undill @@ -0,0 +1,22 @@ +#!/mnt/weka/peacock/llm_eval/llmeval-env/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +unpickle the contents of a pickled object file + +Examples:: + + $ undill hello.pkl + ['hello', 'world'] +""" + +if __name__ == '__main__': + import sys + import dill + for file in sys.argv[1:]: + print (dill.load(open(file,'rb'))) + diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a6e55839ef1d2b76475ac9410a94ff8579329df Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5deab0ea8a60c01b65e264ae794efbd6e2b64a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_version.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e91b1e5f2fa555f47e0a702d929964cad62ddd4f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/_version.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22ae8b532162950195a2a5e645c1044bc2c4379a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/easter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97b3f284d24c81d3faba4c5011331911287cd72 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/relativedelta.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/rrule.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/rrule.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab565e268330da75051d65ad89a487709286bcce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/rrule.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe6ccbcbe1ac24018328629a5e29dc917b823eb1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/tzwin.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbab2e925009437bd821546a0a05044f748a49f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/dateutil/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/_version.py b/llmeval-env/lib/python3.10/site-packages/dateutil/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..ddda98098527a73348e694c2edb691fd625475fc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/dateutil/_version.py @@ -0,0 +1,4 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +__version__ = version = '2.9.0.post0' +__version_tuple__ = version_tuple = (2, 9, 0) diff --git a/llmeval-env/lib/python3.10/site-packages/dateutil/rrule.py b/llmeval-env/lib/python3.10/site-packages/dateutil/rrule.py new file mode 100644 index 0000000000000000000000000000000000000000..571a0d2bc886a7ea4c06196b2f52e740c2ed6e9f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/dateutil/rrule.py @@ -0,0 +1,1737 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import calendar +import datetime +import heapq +import itertools +import re +import sys +from functools import wraps +# For warning about deprecation of until and count +from warnings import warn + +from six import advance_iterator, integer_types + +from six.moves import _thread, range + +from ._common import weekday as weekdaybase + +try: + from math import gcd +except ImportError: + from fractions import gcd + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + @wraps(f) + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penalty. + def count(self): + """ Returns the number of recurrences in this set. It will have go + through the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + If given, this determines how many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param until: + If given, this must be a datetime instance specifying the upper-bound + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the keyword ``until`` in conjunction + with ``count`` is deprecated, to make sure ``dateutil`` is fully + compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` + **must not** occur in the same call to ``rrule``. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + + + +class _rrulestr(object): + """ Parses a string representation of a recurrence rule or set of + recurrence rules. + + :param s: + Required, a string defining one or more recurrence rules. + + :param dtstart: + If given, used as the default recurrence start if not specified in the + rule string. + + :param cache: + If set ``True`` caching of results will be enabled, improving + performance of multiple queries considerably. + + :param unfold: + If set ``True`` indicates that a rule string is split over more + than one line and should be joined before processing. + + :param forceset: + If set ``True`` forces a :class:`dateutil.rrule.rruleset` to + be returned. + + :param compatible: + If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime.datetime` object is returned. + + :param tzids: + If given, a callable or mapping used to retrieve a + :class:`datetime.tzinfo` from a string representation. + Defaults to :func:`dateutil.tz.gettz`. + + :param tzinfos: + Additional time zone names / aliases which may be present in a string + representation. See :func:`dateutil.parser.parse` for more + information. + + :return: + Returns a :class:`dateutil.rrule.rruleset` or + :class:`dateutil.rrule.rrule` + """ + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_date_value(self, date_value, parms, rule_tzids, + ignoretz, tzids, tzinfos): + global parser + if not parser: + from dateutil import parser + + datevals = [] + value_found = False + TZID = None + + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = rule_tzids[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, mapping, or None, ' + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found + # only once. + if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: + raise ValueError("unsupported parm: " + parm) + else: + if value_found: + msg = ("Duplicate value parameter found in: " + parm) + raise ValueError(msg) + value_found = True + + for datestr in date_value.split(','): + date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) + if TZID is not None: + if date.tzinfo is None: + date = date.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART/EXDATE specifies multiple timezone') + datevals.append(date) + + return datevals + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + exdatevals.extend( + self._parse_date_value(value, parms, + TZID_NAMES, ignoretz, + tzids, tzinfos) + ) + elif name == "DTSTART": + dtvals = self._parse_date_value(value, parms, TZID_NAMES, + ignoretz, tzids, tzinfos) + if len(dtvals) != 1: + raise ValueError("Multiple DTSTART values specified:" + + value) + dtstart = dtvals[0] + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + rset.exdate(value) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/llmeval-env/lib/python3.10/site-packages/isympy.py b/llmeval-env/lib/python3.10/site-packages/isympy.py new file mode 100644 index 0000000000000000000000000000000000000000..50e9bc78d08904b8c177105ee90d984ea4b01d20 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/isympy.py @@ -0,0 +1,342 @@ +""" +Python shell for SymPy. + +This is just a normal Python shell (IPython shell if you have the +IPython package installed), that executes the following commands for +the user: + + >>> from __future__ import division + >>> from sympy import * + >>> x, y, z, t = symbols('x y z t') + >>> k, m, n = symbols('k m n', integer=True) + >>> f, g, h = symbols('f g h', cls=Function) + >>> init_printing() + +So starting 'isympy' is equivalent to starting Python (or IPython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. isympy is a good way to use SymPy as an +interactive calculator. If you have IPython and Matplotlib installed, then +interactive plotting is enabled by default. + +COMMAND LINE OPTIONS +-------------------- + +-c CONSOLE, --console=CONSOLE + + Use the specified shell (Python or IPython) shell as the console + backend instead of the default one (IPython if present, Python + otherwise), e.g.: + + $isympy -c python + + CONSOLE must be one of 'ipython' or 'python' + +-p PRETTY, --pretty PRETTY + + Setup pretty-printing in SymPy. When pretty-printing is enabled, + expressions can be printed with Unicode or ASCII. The default is + to use pretty-printing (with Unicode if the terminal supports it). + When this option is 'no', expressions will not be pretty-printed + and ASCII will be used: + + $isympy -p no + + PRETTY must be one of 'unicode', 'ascii', or 'no' + +-t TYPES, --types=TYPES + + Setup the ground types for the polys. By default, gmpy ground types + are used if gmpy2 or gmpy is installed, otherwise it falls back to python + ground types, which are a little bit slower. You can manually + choose python ground types even if gmpy is installed (e.g., for + testing purposes): + + $isympy -t python + + TYPES must be one of 'gmpy', 'gmpy1' or 'python' + + Note that the ground type gmpy1 is primarily intended for testing; it + forces the use of gmpy version 1 even if gmpy2 is available. + + This is the same as setting the environment variable + SYMPY_GROUND_TYPES to the given ground type (e.g., + SYMPY_GROUND_TYPES='gmpy') + + The ground types can be determined interactively from the variable + sympy.polys.domains.GROUND_TYPES. + +-o ORDER, --order ORDER + + Setup the ordering of terms for printing. The default is lex, which + orders terms lexicographically (e.g., x**2 + x + 1). You can choose + other orderings, such as rev-lex, which will use reverse + lexicographic ordering (e.g., 1 + x + x**2): + + $isympy -o rev-lex + + ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex', + 'grevlex', 'rev-grevlex', 'old', or 'none'. + + Note that for very large expressions, ORDER='none' may speed up + printing considerably but the terms will have no canonical order. + +-q, --quiet + + Print only Python's and SymPy's versions to stdout at startup. + +-d, --doctest + + Use the same format that should be used for doctests. This is + equivalent to -c python -p no. + +-C, --no-cache + + Disable the caching mechanism. Disabling the cache may slow certain + operations down considerably. This is useful for testing the cache, + or for benchmarking, as the cache can result in deceptive timings. + + This is equivalent to setting the environment variable + SYMPY_USE_CACHE to 'no'. + +-a, --auto-symbols (requires at least IPython 0.11) + + Automatically create missing symbols. Normally, typing a name of a + Symbol that has not been instantiated first would raise NameError, + but with this option enabled, any undefined name will be + automatically created as a Symbol. + + Note that this is intended only for interactive, calculator style + usage. In a script that uses SymPy, Symbols should be instantiated + at the top, so that it's clear what they are. + + This will not override any names that are already defined, which + includes the single character letters represented by the mnemonic + QCOSINE (see the "Gotchas and Pitfalls" document in the + documentation). You can delete existing names by executing "del + name". If a name is defined, typing "'name' in dir()" will return True. + + The Symbols that are created using this have default assumptions. + If you want to place assumptions on symbols, you should create them + using symbols() or var(). + + Finally, this only works in the top level namespace. So, for + example, if you define a function in isympy with an undefined + Symbol, it will not work. + + See also the -i and -I options. + +-i, --int-to-Integer (requires at least IPython 0.11) + + Automatically wrap int literals with Integer. This makes it so that + things like 1/2 will come out as Rational(1, 2), rather than 0.5. This + works by preprocessing the source and wrapping all int literals with + Integer. Note that this will not change the behavior of int literals + assigned to variables, and it also won't change the behavior of functions + that return int literals. + + If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2) + gives 1.5 (with division imported from __future__). + +-I, --interactive (requires at least IPython 0.11) + + This is equivalent to --auto-symbols --int-to-Integer. Future options + designed for ease of interactive use may be added to this. + +-D, --debug + + Enable debugging output. This is the same as setting the + environment variable SYMPY_DEBUG to 'True'. The debug status is set + in the variable SYMPY_DEBUG within isympy. + +-- IPython options + + Additionally you can pass command line options directly to the IPython + interpreter (the standard Python shell is not supported). However you + need to add the '--' separator between two types of options, e.g the + startup banner option and the colors option. You need to enter the + options as required by the version of IPython that you are using, too: + + in IPython 0.11, + + $isympy -q -- --colors=NoColor + + or older versions of IPython, + + $isympy -q -- -colors NoColor + +See also isympy --help. +""" + +import os +import sys + +# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables +# by the command line will break. + +def main() -> None: + from argparse import ArgumentParser, RawDescriptionHelpFormatter + + VERSION = None + if '--version' in sys.argv: + # We cannot import sympy before this is run, because flags like -C and + # -t set environment variables that must be set before SymPy is + # imported. The only thing we need to import it for is to get the + # version, which only matters with the --version flag. + import sympy + VERSION = sympy.__version__ + + usage = 'isympy [options] -- [ipython options]' + parser = ArgumentParser( + usage=usage, + description=__doc__, + formatter_class=RawDescriptionHelpFormatter, + ) + + parser.add_argument('--version', action='version', version=VERSION) + + parser.add_argument( + '-c', '--console', + dest='console', + action='store', + default=None, + choices=['ipython', 'python'], + metavar='CONSOLE', + help='select type of interactive session: ipython | python; defaults ' + 'to ipython if IPython is installed, otherwise python') + + parser.add_argument( + '-p', '--pretty', + dest='pretty', + action='store', + default=None, + metavar='PRETTY', + choices=['unicode', 'ascii', 'no'], + help='setup pretty printing: unicode | ascii | no; defaults to ' + 'unicode printing if the terminal supports it, otherwise ascii') + + parser.add_argument( + '-t', '--types', + dest='types', + action='store', + default=None, + metavar='TYPES', + choices=['gmpy', 'gmpy1', 'python'], + help='setup ground types: gmpy | gmpy1 | python; defaults to gmpy if gmpy2 ' + 'or gmpy is installed, otherwise python') + + parser.add_argument( + '-o', '--order', + dest='order', + action='store', + default=None, + metavar='ORDER', + choices=['lex', 'grlex', 'grevlex', 'rev-lex', 'rev-grlex', 'rev-grevlex', 'old', 'none'], + help='setup ordering of terms: [rev-]lex | [rev-]grlex | [rev-]grevlex | old | none; defaults to lex') + + parser.add_argument( + '-q', '--quiet', + dest='quiet', + action='store_true', + default=False, + help='print only version information at startup') + + parser.add_argument( + '-d', '--doctest', + dest='doctest', + action='store_true', + default=False, + help='use the doctest format for output (you can just copy and paste it)') + + parser.add_argument( + '-C', '--no-cache', + dest='cache', + action='store_false', + default=True, + help='disable caching mechanism') + + parser.add_argument( + '-a', '--auto-symbols', + dest='auto_symbols', + action='store_true', + default=False, + help='automatically construct missing symbols') + + parser.add_argument( + '-i', '--int-to-Integer', + dest='auto_int_to_Integer', + action='store_true', + default=False, + help="automatically wrap int literals with Integer") + + parser.add_argument( + '-I', '--interactive', + dest='interactive', + action='store_true', + default=False, + help="equivalent to -a -i") + + parser.add_argument( + '-D', '--debug', + dest='debug', + action='store_true', + default=False, + help='enable debugging output') + + (options, ipy_args) = parser.parse_known_args() + if '--' in ipy_args: + ipy_args.remove('--') + + if not options.cache: + os.environ['SYMPY_USE_CACHE'] = 'no' + + if options.types: + os.environ['SYMPY_GROUND_TYPES'] = options.types + + if options.debug: + os.environ['SYMPY_DEBUG'] = str(options.debug) + + if options.doctest: + options.pretty = 'no' + options.console = 'python' + + session = options.console + + if session is not None: + ipython = session == 'ipython' + else: + try: + import IPython + ipython = True + except ImportError: + if not options.quiet: + from sympy.interactive.session import no_ipython + print(no_ipython) + ipython = False + + args = { + 'pretty_print': True, + 'use_unicode': None, + 'use_latex': None, + 'order': None, + 'argv': ipy_args, + } + + if options.pretty == 'unicode': + args['use_unicode'] = True + elif options.pretty == 'ascii': + args['use_unicode'] = False + elif options.pretty == 'no': + args['pretty_print'] = False + + if options.order is not None: + args['order'] = options.order + + args['quiet'] = options.quiet + args['auto_symbols'] = options.auto_symbols or options.interactive + args['auto_int_to_Integer'] = options.auto_int_to_Integer or options.interactive + + from sympy.interactive import init_session + init_session(ipython, **args) + +if __name__ == "__main__": + main() diff --git a/llmeval-env/lib/python3.10/site-packages/pip/py.typed b/llmeval-env/lib/python3.10/site-packages/pip/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..493b53e4e7a3984ddd49780313bf3bd9901dc1e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pip/py.typed @@ -0,0 +1,4 @@ +pip is a command line program. While it is implemented in Python, and so is +available for import, you must not use pip's internal APIs in this way. Typing +information is provided as a convenience only and is not a guarantee. Expect +unannounced changes to the API and types in releases. diff --git a/llmeval-env/lib/python3.10/site-packages/sqlitedict.py b/llmeval-env/lib/python3.10/site-packages/sqlitedict.py new file mode 100644 index 0000000000000000000000000000000000000000..3e65724a2f1d32c8105f7ccae7eafe54f6c2ba5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/sqlitedict.py @@ -0,0 +1,697 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# This code is distributed under the terms and conditions +# from the Apache License, Version 2.0 +# +# http://opensource.org/licenses/apache2.0.php +# +# This code was inspired by: +# * http://code.activestate.com/recipes/576638-draft-for-an-sqlite3-based-dbm/ +# * http://code.activestate.com/recipes/526618/ + +""" +A lightweight wrapper around Python's sqlite3 database, with a dict-like interface +and multi-thread access support:: + +>>> mydict = SqliteDict('some.db', autocommit=True) # the mapping will be persisted to file `some.db` +>>> mydict['some_key'] = any_picklable_object +>>> print mydict['some_key'] +>>> print len(mydict) # etc... all dict functions work + +Pickle is used internally to serialize the values. Keys are strings. + +If you don't use autocommit (default is no autocommit for performance), then +don't forget to call `mydict.commit()` when done with a transaction. + +""" + +import sqlite3 +import os +import sys +import tempfile +import threading +import logging +import traceback +from base64 import b64decode, b64encode +import weakref + +__version__ = '2.1.0' + + +def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + +try: + from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL +except ImportError: + from pickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL + +# some Python 3 vs 2 imports +try: + from collections import UserDict as DictClass +except ImportError: + from UserDict import DictMixin as DictClass + +try: + from queue import Queue +except ImportError: + from Queue import Queue + + +logger = logging.getLogger(__name__) + +# +# There's a thread that holds the actual SQL connection (SqliteMultithread). +# We communicate with this thread via queues (request and responses). +# The requests can either be SQL commands or one of the "special" commands +# below: +# +# _REQUEST_CLOSE: request that the SQL connection be closed +# _REQUEST_COMMIT: request that any changes be committed to the DB +# +# Responses are either SQL records (e.g. results of a SELECT) or the magic +# _RESPONSE_NO_MORE command, which indicates nothing else will ever be written +# to the response queue. +# +_REQUEST_CLOSE = '--close--' +_REQUEST_COMMIT = '--commit--' +_RESPONSE_NO_MORE = '--no more--' + +# +# We work with weak references for better memory efficiency. +# Dereferencing, checking the referent queue still exists, and putting to it +# is boring and repetitive, so we have a _put function to handle it for us. +# +_PUT_OK, _PUT_REFERENT_DESTROYED, _PUT_NOOP = 0, 1, 2 + + +def _put(queue_reference, item): + if queue_reference is not None: + queue = queue_reference() + if queue is None: + # + # We got a reference to a queue, but that queue no longer exists + # + retval = _PUT_REFERENT_DESTROYED + else: + queue.put(item) + retval = _PUT_OK + + del queue + return retval + + # + # We didn't get a reference to a queue, so do nothing (no-op). + # + return _PUT_NOOP + + +def open(*args, **kwargs): + """See documentation of the SqliteDict class.""" + return SqliteDict(*args, **kwargs) + + +def encode(obj): + """Serialize an object using pickle to a binary format accepted by SQLite.""" + return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL)) + + +def decode(obj): + """Deserialize objects retrieved from SQLite.""" + return loads(bytes(obj)) + + +def encode_key(key): + """Serialize a key using pickle + base64 encoding to text accepted by SQLite.""" + return b64encode(dumps(key, protocol=PICKLE_PROTOCOL)).decode("ascii") + + +def decode_key(key): + """Deserialize a key retrieved from SQLite.""" + return loads(b64decode(key.encode("ascii"))) + + +def identity(obj): + """Identity f(x) = x function for encoding/decoding.""" + return obj + + +class SqliteDict(DictClass): + VALID_FLAGS = ['c', 'r', 'w', 'n'] + + def __init__(self, filename=None, tablename='unnamed', flag='c', + autocommit=False, journal_mode="DELETE", encode=encode, + decode=decode, encode_key=identity, decode_key=identity, + timeout=5, outer_stack=True): + """ + Initialize a thread-safe sqlite-backed dictionary. The dictionary will + be a table `tablename` in database file `filename`. A single file (=database) + may contain multiple tables. + + If no `filename` is given, a random file in temp will be used (and deleted + from temp once the dict is closed/deleted). + + If you enable `autocommit`, changes will be committed after each operation + (more inefficient but safer). Otherwise, changes are committed on `self.commit()`, + `self.clear()` and `self.close()`. + + Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems + or if you need performance and don't care about crash-consistency. + + Set `outer_stack` to False to disable the output of the outer exception + to the error logs. This may improve the efficiency of sqlitedict + operation at the expense of a detailed exception trace. + + The `flag` parameter. Exactly one of: + 'c': default mode, open for read/write, creating the db/table if necessary. + 'w': open for r/w, but drop `tablename` contents first (start with empty table) + 'r': open as read-only + 'n': create a new database (erasing any existing tables, not just `tablename`!). + + The `encode` and `decode` parameters are used to customize how the values + are serialized and deserialized. + The `encode` parameter must be a function that takes a single Python + object and returns a serialized representation. + The `decode` function must be a function that takes the serialized + representation produced by `encode` and returns a deserialized Python + object. + The default is to use pickle. + + The `timeout` defines the maximum time (in seconds) to wait for initial Thread startup. + + """ + self.in_temp = filename is None + if self.in_temp: + fd, filename = tempfile.mkstemp(prefix='sqldict') + os.close(fd) + + if flag not in SqliteDict.VALID_FLAGS: + raise RuntimeError("Unrecognized flag: %s" % flag) + self.flag = flag + + if flag == 'n': + if os.path.exists(filename): + os.remove(filename) + + dirname = os.path.dirname(filename) + if dirname: + if not os.path.exists(dirname): + raise RuntimeError('Error! The directory does not exist, %s' % dirname) + + self.filename = filename + + # Use standard SQL escaping of double quote characters in identifiers, by doubling them. + # See https://github.com/RaRe-Technologies/sqlitedict/pull/113 + self.tablename = tablename.replace('"', '""') + + self.autocommit = autocommit + self.journal_mode = journal_mode + self.encode = encode + self.decode = decode + self.encode_key = encode_key + self.decode_key = decode_key + self._outer_stack = outer_stack + + logger.debug("opening Sqlite table %r in %r" % (tablename, filename)) + self.conn = self._new_conn() + if self.flag == 'r': + if self.tablename not in SqliteDict.get_tablenames(self.filename): + msg = 'Refusing to create a new table "%s" in read-only DB mode' % tablename + raise RuntimeError(msg) + else: + MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS "%s" (key TEXT PRIMARY KEY, value BLOB)' % self.tablename + self.conn.execute(MAKE_TABLE) + self.conn.commit() + if flag == 'w': + self.clear() + + def _new_conn(self): + return SqliteMultithread( + self.filename, + autocommit=self.autocommit, + journal_mode=self.journal_mode, + outer_stack=self._outer_stack, + ) + + def __enter__(self): + if not hasattr(self, 'conn') or self.conn is None: + self.conn = self._new_conn() + return self + + def __exit__(self, *exc_info): + self.close() + + def __str__(self): + return "SqliteDict(%s)" % (self.filename) + + def __repr__(self): + return str(self) # no need of something complex + + def __len__(self): + # `select count (*)` is super slow in sqlite (does a linear scan!!) + # As a result, len() is very slow too once the table size grows beyond trivial. + # We could keep the total count of rows ourselves, by means of triggers, + # but that seems too complicated and would slow down normal operation + # (insert/delete etc). + GET_LEN = 'SELECT COUNT(*) FROM "%s"' % self.tablename + rows = self.conn.select_one(GET_LEN)[0] + return rows if rows is not None else 0 + + def __bool__(self): + # No elements is False, otherwise True + GET_MAX = 'SELECT MAX(ROWID) FROM "%s"' % self.tablename + m = self.conn.select_one(GET_MAX)[0] + # Explicit better than implicit and bla bla + return True if m is not None else False + + def iterkeys(self): + GET_KEYS = 'SELECT key FROM "%s" ORDER BY rowid' % self.tablename + for key in self.conn.select(GET_KEYS): + yield self.decode_key(key[0]) + + def itervalues(self): + GET_VALUES = 'SELECT value FROM "%s" ORDER BY rowid' % self.tablename + for value in self.conn.select(GET_VALUES): + yield self.decode(value[0]) + + def iteritems(self): + GET_ITEMS = 'SELECT key, value FROM "%s" ORDER BY rowid' % self.tablename + for key, value in self.conn.select(GET_ITEMS): + yield self.decode_key(key), self.decode(value) + + def keys(self): + return self.iterkeys() + + def values(self): + return self.itervalues() + + def items(self): + return self.iteritems() + + def __contains__(self, key): + HAS_ITEM = 'SELECT 1 FROM "%s" WHERE key = ?' % self.tablename + return self.conn.select_one(HAS_ITEM, (self.encode_key(key),)) is not None + + def __getitem__(self, key): + GET_ITEM = 'SELECT value FROM "%s" WHERE key = ?' % self.tablename + item = self.conn.select_one(GET_ITEM, (self.encode_key(key),)) + if item is None: + raise KeyError(key) + return self.decode(item[0]) + + def __setitem__(self, key, value): + if self.flag == 'r': + raise RuntimeError('Refusing to write to read-only SqliteDict') + + ADD_ITEM = 'REPLACE INTO "%s" (key, value) VALUES (?,?)' % self.tablename + self.conn.execute(ADD_ITEM, (self.encode_key(key), self.encode(value))) + if self.autocommit: + self.commit() + + def __delitem__(self, key): + if self.flag == 'r': + raise RuntimeError('Refusing to delete from read-only SqliteDict') + + if key not in self: + raise KeyError(key) + DEL_ITEM = 'DELETE FROM "%s" WHERE key = ?' % self.tablename + self.conn.execute(DEL_ITEM, (self.encode_key(key),)) + if self.autocommit: + self.commit() + + def update(self, items=(), **kwds): + if self.flag == 'r': + raise RuntimeError('Refusing to update read-only SqliteDict') + + try: + items = items.items() + except AttributeError: + pass + items = [(self.encode_key(k), self.encode(v)) for k, v in items] + + UPDATE_ITEMS = 'REPLACE INTO "%s" (key, value) VALUES (?, ?)' % self.tablename + self.conn.executemany(UPDATE_ITEMS, items) + if kwds: + self.update(kwds) + if self.autocommit: + self.commit() + + def __iter__(self): + return self.iterkeys() + + def clear(self): + if self.flag == 'r': + raise RuntimeError('Refusing to clear read-only SqliteDict') + + # avoid VACUUM, as it gives "OperationalError: database schema has changed" + CLEAR_ALL = 'DELETE FROM "%s";' % self.tablename + self.conn.commit() + self.conn.execute(CLEAR_ALL) + self.conn.commit() + + @staticmethod + def get_tablenames(filename): + """get the names of the tables in an sqlite db as a list""" + if not os.path.isfile(filename): + raise IOError('file %s does not exist' % (filename)) + GET_TABLENAMES = 'SELECT name FROM sqlite_master WHERE type="table"' + with sqlite3.connect(filename) as conn: + cursor = conn.execute(GET_TABLENAMES) + res = cursor.fetchall() + + return [name[0] for name in res] + + def commit(self, blocking=True): + """ + Persist all data to disk. + + When `blocking` is False, the commit command is queued, but the data is + not guaranteed persisted (default implication when autocommit=True). + """ + if self.conn is not None: + self.conn.commit(blocking) + sync = commit + + def close(self, do_log=True, force=False): + if do_log: + logger.debug("closing %s" % self) + if hasattr(self, 'conn') and self.conn is not None: + if self.conn.autocommit and not force: + # typically calls to commit are non-blocking when autocommit is + # used. However, we need to block on close() to ensure any + # awaiting exceptions are handled and that all data is + # persisted to disk before returning. + self.conn.commit(blocking=True) + self.conn.close(force=force) + self.conn = None + if self.in_temp: + try: + os.remove(self.filename) + except Exception: + pass + + def terminate(self): + """Delete the underlying database file. Use with care.""" + if self.flag == 'r': + raise RuntimeError('Refusing to terminate read-only SqliteDict') + + self.close() + + if self.filename == ':memory:': + return + + logger.info("deleting %s" % self.filename) + try: + if os.path.isfile(self.filename): + os.remove(self.filename) + except (OSError, IOError): + logger.exception("failed to delete %s" % (self.filename)) + + def __del__(self): + # like close(), but assume globals are gone by now (do not log!) + try: + self.close(do_log=False, force=True) + except Exception: + # prevent error log flood in case of multiple SqliteDicts + # closed after connection lost (exceptions are always ignored + # in __del__ method. + pass + + +class SqliteMultithread(threading.Thread): + """ + Wrap sqlite connection in a way that allows concurrent requests from multiple threads. + + This is done by internally queueing the requests and processing them sequentially + in a separate thread (in the same order they arrived). + + """ + def __init__(self, filename, autocommit, journal_mode, outer_stack=True): + super(SqliteMultithread, self).__init__() + self.filename = filename + self.autocommit = autocommit + self.journal_mode = journal_mode + # use request queue of unlimited size + self.reqs = Queue() + self.daemon = True + self._outer_stack = outer_stack + self.log = logging.getLogger('sqlitedict.SqliteMultithread') + + # + # Parts of this object's state get accessed from different threads, so + # we use synchronization to avoid race conditions. For example, + # .exception gets set inside the new daemon thread that we spawned, but + # gets read from the main thread. This is particularly important + # during initialization: the Thread needs some time to actually start + # working, and until this happens, any calls to e.g. + # check_raise_error() will prematurely return None, meaning all is + # well. If the that connection happens to fail, we'll never know about + # it, and instead wait for a result that never arrives (effectively, + # deadlocking). Locking solves this problem by eliminating the race + # condition. + # + self._lock = threading.Lock() + self._lock.acquire() + self.exception = None + + self.start() + + def _connect(self): + """Connect to the underlying database. + + Raises an exception on failure. Returns the connection and cursor on success. + """ + try: + if self.autocommit: + conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) + else: + conn = sqlite3.connect(self.filename, check_same_thread=False) + except Exception: + self.log.exception("Failed to initialize connection for filename: %s" % self.filename) + self.exception = sys.exc_info() + raise + + try: + conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) + conn.text_factory = str + cursor = conn.cursor() + conn.commit() + cursor.execute('PRAGMA synchronous=OFF') + except Exception: + self.log.exception("Failed to execute PRAGMA statements.") + self.exception = sys.exc_info() + raise + + return conn, cursor + + def run(self): + # + # Nb. this is what actually runs inside the new daemon thread. + # self._lock is locked at this stage - see the initializer function. + # + try: + conn, cursor = self._connect() + finally: + self._lock.release() + + res_ref = None + while True: + # + # req: an SQL command or one of the --magic-- commands we use internally + # arg: arguments for the command + # res_ref: a weak reference to the queue into which responses must be placed + # outer_stack: the outer stack, for producing more informative traces in case of error + # + req, arg, res_ref, outer_stack = self.reqs.get() + + if req == _REQUEST_CLOSE: + assert res_ref, ('--close-- without return queue', res_ref) + break + elif req == _REQUEST_COMMIT: + conn.commit() + _put(res_ref, _RESPONSE_NO_MORE) + else: + try: + cursor.execute(req, arg) + except Exception: + with self._lock: + self.exception = (e_type, e_value, e_tb) = sys.exc_info() + + inner_stack = traceback.extract_stack() + + # An exception occurred in our thread, but we may not + # immediately able to throw it in our calling thread, if it has + # no return `res` queue: log as level ERROR both the inner and + # outer exception immediately. + # + # Any iteration of res.get() or any next call will detect the + # inner exception and re-raise it in the calling Thread; though + # it may be confusing to see an exception for an unrelated + # statement, an ERROR log statement from the 'sqlitedict.*' + # namespace contains the original outer stack location. + self.log.error('Inner exception:') + for item in traceback.format_list(inner_stack): + self.log.error(item) + self.log.error('') # deliniate traceback & exception w/blank line + for item in traceback.format_exception_only(e_type, e_value): + self.log.error(item) + + self.log.error('') # exception & outer stack w/blank line + + if self._outer_stack: + self.log.error('Outer stack:') + for item in traceback.format_list(outer_stack): + self.log.error(item) + self.log.error('Exception will be re-raised at next call.') + else: + self.log.error( + 'Unable to show the outer stack. Pass ' + 'outer_stack=True when initializing the ' + 'SqliteDict instance to show the outer stack.' + ) + + if res_ref: + for rec in cursor: + if _put(res_ref, rec) == _PUT_REFERENT_DESTROYED: + # + # The queue we are sending responses to got garbage + # collected. Nobody is listening anymore, so we + # stop sending responses. + # + break + + _put(res_ref, _RESPONSE_NO_MORE) + + if self.autocommit: + conn.commit() + + self.log.debug('received: %s, send: --no more--', req) + conn.close() + + _put(res_ref, _RESPONSE_NO_MORE) + + def check_raise_error(self): + """ + Check for and raise exception for any previous sqlite query. + + For the `execute*` family of method calls, such calls are non-blocking and any + exception raised in the thread cannot be handled by the calling Thread (usually + MainThread). This method is called on `close`, and prior to any subsequent + calls to the `execute*` methods to check for and raise an exception in a + previous call to the MainThread. + """ + with self._lock: + if self.exception: + e_type, e_value, e_tb = self.exception + + # clear self.exception, if the caller decides to handle such + # exception, we should not repeatedly re-raise it. + self.exception = None + + self.log.error('An exception occurred from a previous statement, view ' + 'the logging namespace "sqlitedict" for outer stack.') + + # The third argument to raise is the traceback object, and it is + # substituted instead of the current location as the place where + # the exception occurred, this is so that when using debuggers such + # as `pdb', or simply evaluating the naturally raised traceback, we + # retain the original (inner) location of where the exception + # occurred. + reraise(e_type, e_value, e_tb) + + def execute(self, req, arg=None, res=None): + """ + `execute` calls are non-blocking: just queue up the request and return immediately. + + :param req: The request (an SQL command) + :param arg: Arguments to the SQL command + :param res: A queue in which to place responses as they become available + """ + self.check_raise_error() + stack = None + + if self._outer_stack: + # NOTE: This might be a lot of information to pump into an input + # queue, affecting performance. I've also seen earlier versions of + # jython take a severe performance impact for throwing exceptions + # so often. + stack = traceback.extract_stack()[:-1] + + # + # We pass a weak reference to the response queue instead of a regular + # reference, because we want the queues to be garbage-collected + # more aggressively. + # + res_ref = None + if res: + res_ref = weakref.ref(res) + + self.reqs.put((req, arg or tuple(), res_ref, stack)) + + def executemany(self, req, items): + for item in items: + self.execute(req, item) + self.check_raise_error() + + def select(self, req, arg=None): + """ + Unlike sqlite's native select, this select doesn't handle iteration efficiently. + + The result of `select` starts filling up with values as soon as the + request is dequeued, and although you can iterate over the result normally + (`for res in self.select(): ...`), the entire result will be in memory. + """ + res = Queue() # results of the select will appear as items in this queue + self.execute(req, arg, res) + while True: + rec = res.get() + self.check_raise_error() + if rec == _RESPONSE_NO_MORE: + break + yield rec + + def select_one(self, req, arg=None): + """Return only the first row of the SELECT, or None if there are no matching rows.""" + try: + return next(iter(self.select(req, arg))) + except StopIteration: + return None + + def commit(self, blocking=True): + if blocking: + # by default, we await completion of commit() unless + # blocking=False. This ensures any available exceptions for any + # previous statement are thrown before returning, and that the + # data has actually persisted to disk! + self.select_one(_REQUEST_COMMIT) + else: + # otherwise, we fire and forget as usual. + self.execute(_REQUEST_COMMIT) + + def close(self, force=False): + if force: + # If a SqliteDict is being killed or garbage-collected, then select_one() + # could hang forever because run() might already have exited and therefore + # can't process the request. Instead, push the close command to the requests + # queue directly. If run() is still alive, it will exit gracefully. If not, + # then there's nothing we can do anyway. + self.reqs.put((_REQUEST_CLOSE, None, weakref.ref(Queue()), None)) + else: + # we abuse 'select' to "iter" over a "--close--" statement so that we + # can confirm the completion of close before joining the thread and + # returning (by semaphore '--no more--' + self.select_one(_REQUEST_CLOSE) + self.join() + + +# +# This is here for .github/workflows/release.yml +# +if __name__ == '__main__': + print(__version__) diff --git a/llmeval-env/lib/python3.10/site-packages/threadpoolctl.py b/llmeval-env/lib/python3.10/site-packages/threadpoolctl.py new file mode 100644 index 0000000000000000000000000000000000000000..2a72d1b57e1d764d73052583d3c5a8e8b7697eae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/threadpoolctl.py @@ -0,0 +1,1280 @@ +"""threadpoolctl + +This module provides utilities to introspect native libraries that relies on +thread pools (notably BLAS and OpenMP implementations) and dynamically set the +maximal number of threads they can use. +""" + +# License: BSD 3-Clause + +# The code to introspect dynamically loaded libraries on POSIX systems is +# adapted from code by Intel developer @anton-malakhov available at +# https://github.com/IntelPython/smp (Copyright (c) 2017, Intel Corporation) +# and also published under the BSD 3-Clause license +import os +import re +import sys +import ctypes +import itertools +import textwrap +from typing import final +import warnings +from ctypes.util import find_library +from abc import ABC, abstractmethod +from functools import lru_cache +from contextlib import ContextDecorator + +__version__ = "3.5.0" +__all__ = [ + "threadpool_limits", + "threadpool_info", + "ThreadpoolController", + "LibController", + "register", +] + + +# One can get runtime errors or even segfaults due to multiple OpenMP libraries +# loaded simultaneously which can happen easily in Python when importing and +# using compiled extensions built with different compilers and therefore +# different OpenMP runtimes in the same program. In particular libiomp (used by +# Intel ICC) and libomp used by clang/llvm tend to crash. This can happen for +# instance when calling BLAS inside a prange. Setting the following environment +# variable allows multiple OpenMP libraries to be loaded. It should not degrade +# performances since we manually take care of potential over-subscription +# performance issues, in sections of the code where nested OpenMP loops can +# happen, by dynamically reconfiguring the inner OpenMP runtime to temporarily +# disable it while under the scope of the outer OpenMP parallel section. +os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True") + +# Structure to cast the info on dynamically loaded library. See +# https://linux.die.net/man/3/dl_iterate_phdr for more details. +_SYSTEM_UINT = ctypes.c_uint64 if sys.maxsize > 2**32 else ctypes.c_uint32 +_SYSTEM_UINT_HALF = ctypes.c_uint32 if sys.maxsize > 2**32 else ctypes.c_uint16 + + +class _dl_phdr_info(ctypes.Structure): + _fields_ = [ + ("dlpi_addr", _SYSTEM_UINT), # Base address of object + ("dlpi_name", ctypes.c_char_p), # path to the library + ("dlpi_phdr", ctypes.c_void_p), # pointer on dlpi_headers + ("dlpi_phnum", _SYSTEM_UINT_HALF), # number of elements in dlpi_phdr + ] + + +# The RTLD_NOLOAD flag for loading shared libraries is not defined on Windows. +try: + _RTLD_NOLOAD = os.RTLD_NOLOAD +except AttributeError: + _RTLD_NOLOAD = ctypes.DEFAULT_MODE + + +class LibController(ABC): + """Abstract base class for the individual library controllers + + A library controller must expose the following class attributes: + - user_api : str + Usually the name of the library or generic specification the library + implements, e.g. "blas" is a specification with different implementations. + - internal_api : str + Usually the name of the library or concrete implementation of some + specification, e.g. "openblas" is an implementation of the "blas" + specification. + - filename_prefixes : tuple + Possible prefixes of the shared library's filename that allow to + identify the library. e.g. "libopenblas" for libopenblas.so. + + and implement the following methods: `get_num_threads`, `set_num_threads` and + `get_version`. + + Threadpoolctl loops through all the loaded shared libraries and tries to match + the filename of each library with the `filename_prefixes`. If a match is found, a + controller is instantiated and a handler to the library is stored in the `dynlib` + attribute as a `ctypes.CDLL` object. It can be used to access the necessary symbols + of the shared library to implement the above methods. + + The following information will be exposed in the info dictionary: + - user_api : standardized API, if any, or a copy of internal_api. + - internal_api : implementation-specific API. + - num_threads : the current thread limit. + - prefix : prefix of the shared library's filename. + - filepath : path to the loaded shared library. + - version : version of the library (if available). + + In addition, each library controller may expose internal API specific entries. They + must be set as attributes in the `set_additional_attributes` method. + """ + + @final + def __init__(self, *, filepath=None, prefix=None, parent=None): + """This is not meant to be overriden by subclasses.""" + self.parent = parent + self.prefix = prefix + self.filepath = filepath + self.dynlib = ctypes.CDLL(filepath, mode=_RTLD_NOLOAD) + self._symbol_prefix, self._symbol_suffix = self._find_affixes() + self.version = self.get_version() + self.set_additional_attributes() + + def info(self): + """Return relevant info wrapped in a dict""" + hidden_attrs = ("dynlib", "parent", "_symbol_prefix", "_symbol_suffix") + return { + "user_api": self.user_api, + "internal_api": self.internal_api, + "num_threads": self.num_threads, + **{k: v for k, v in vars(self).items() if k not in hidden_attrs}, + } + + def set_additional_attributes(self): + """Set additional attributes meant to be exposed in the info dict""" + + @property + def num_threads(self): + """Exposes the current thread limit as a dynamic property + + This is not meant to be used or overriden by subclasses. + """ + return self.get_num_threads() + + @abstractmethod + def get_num_threads(self): + """Return the maximum number of threads available to use""" + + @abstractmethod + def set_num_threads(self, num_threads): + """Set the maximum number of threads to use""" + + @abstractmethod + def get_version(self): + """Return the version of the shared library""" + + def _find_affixes(self): + """Return the affixes for the symbols of the shared library""" + return "", "" + + def _get_symbol(self, name): + """Return the symbol of the shared library accounding for the affixes""" + return getattr( + self.dynlib, f"{self._symbol_prefix}{name}{self._symbol_suffix}", None + ) + + +class OpenBLASController(LibController): + """Controller class for OpenBLAS""" + + user_api = "blas" + internal_api = "openblas" + filename_prefixes = ("libopenblas", "libblas", "libscipy_openblas") + + _symbol_prefixes = ("", "scipy_") + _symbol_suffixes = ("", "64_", "_64") + + # All variations of "openblas_get_num_threads", accounting for the affixes + check_symbols = tuple( + f"{prefix}openblas_get_num_threads{suffix}" + for prefix, suffix in itertools.product(_symbol_prefixes, _symbol_suffixes) + ) + + def _find_affixes(self): + for prefix, suffix in itertools.product( + self._symbol_prefixes, self._symbol_suffixes + ): + if hasattr(self.dynlib, f"{prefix}openblas_get_num_threads{suffix}"): + return prefix, suffix + + def set_additional_attributes(self): + self.threading_layer = self._get_threading_layer() + self.architecture = self._get_architecture() + + def get_num_threads(self): + get_num_threads_func = self._get_symbol("openblas_get_num_threads") + if get_num_threads_func is not None: + return get_num_threads_func() + return None + + def set_num_threads(self, num_threads): + set_num_threads_func = self._get_symbol("openblas_set_num_threads") + if set_num_threads_func is not None: + return set_num_threads_func(num_threads) + return None + + def get_version(self): + # None means OpenBLAS is not loaded or version < 0.3.4, since OpenBLAS + # did not expose its version before that. + get_version_func = self._get_symbol("openblas_get_config") + if get_version_func is not None: + get_version_func.restype = ctypes.c_char_p + config = get_version_func().split() + if config[0] == b"OpenBLAS": + return config[1].decode("utf-8") + return None + return None + + def _get_threading_layer(self): + """Return the threading layer of OpenBLAS""" + get_threading_layer_func = self._get_symbol("openblas_get_parallel") + if get_threading_layer_func is not None: + threading_layer = get_threading_layer_func() + if threading_layer == 2: + return "openmp" + elif threading_layer == 1: + return "pthreads" + return "disabled" + return "unknown" + + def _get_architecture(self): + """Return the architecture detected by OpenBLAS""" + get_architecture_func = self._get_symbol("openblas_get_corename") + if get_architecture_func is not None: + get_architecture_func.restype = ctypes.c_char_p + return get_architecture_func().decode("utf-8") + return None + + +class BLISController(LibController): + """Controller class for BLIS""" + + user_api = "blas" + internal_api = "blis" + filename_prefixes = ("libblis", "libblas") + check_symbols = ( + "bli_thread_get_num_threads", + "bli_thread_set_num_threads", + "bli_info_get_version_str", + "bli_info_get_enable_openmp", + "bli_info_get_enable_pthreads", + "bli_arch_query_id", + "bli_arch_string", + ) + + def set_additional_attributes(self): + self.threading_layer = self._get_threading_layer() + self.architecture = self._get_architecture() + + def get_num_threads(self): + get_func = getattr(self.dynlib, "bli_thread_get_num_threads", lambda: None) + num_threads = get_func() + # by default BLIS is single-threaded and get_num_threads + # returns -1. We map it to 1 for consistency with other libraries. + return 1 if num_threads == -1 else num_threads + + def set_num_threads(self, num_threads): + set_func = getattr( + self.dynlib, "bli_thread_set_num_threads", lambda num_threads: None + ) + return set_func(num_threads) + + def get_version(self): + get_version_ = getattr(self.dynlib, "bli_info_get_version_str", None) + if get_version_ is None: + return None + + get_version_.restype = ctypes.c_char_p + return get_version_().decode("utf-8") + + def _get_threading_layer(self): + """Return the threading layer of BLIS""" + if getattr(self.dynlib, "bli_info_get_enable_openmp", lambda: False)(): + return "openmp" + elif getattr(self.dynlib, "bli_info_get_enable_pthreads", lambda: False)(): + return "pthreads" + return "disabled" + + def _get_architecture(self): + """Return the architecture detected by BLIS""" + bli_arch_query_id = getattr(self.dynlib, "bli_arch_query_id", None) + bli_arch_string = getattr(self.dynlib, "bli_arch_string", None) + if bli_arch_query_id is None or bli_arch_string is None: + return None + + # the true restype should be BLIS' arch_t (enum) but int should work + # for us: + bli_arch_query_id.restype = ctypes.c_int + bli_arch_string.restype = ctypes.c_char_p + return bli_arch_string(bli_arch_query_id()).decode("utf-8") + + +class FlexiBLASController(LibController): + """Controller class for FlexiBLAS""" + + user_api = "blas" + internal_api = "flexiblas" + filename_prefixes = ("libflexiblas",) + check_symbols = ( + "flexiblas_get_num_threads", + "flexiblas_set_num_threads", + "flexiblas_get_version", + "flexiblas_list", + "flexiblas_list_loaded", + "flexiblas_current_backend", + ) + + @property + def loaded_backends(self): + return self._get_backend_list(loaded=True) + + @property + def current_backend(self): + return self._get_current_backend() + + def info(self): + """Return relevant info wrapped in a dict""" + # We override the info method because the loaded and current backends + # are dynamic properties + exposed_attrs = super().info() + exposed_attrs["loaded_backends"] = self.loaded_backends + exposed_attrs["current_backend"] = self.current_backend + + return exposed_attrs + + def set_additional_attributes(self): + self.available_backends = self._get_backend_list(loaded=False) + + def get_num_threads(self): + get_func = getattr(self.dynlib, "flexiblas_get_num_threads", lambda: None) + num_threads = get_func() + # by default BLIS is single-threaded and get_num_threads + # returns -1. We map it to 1 for consistency with other libraries. + return 1 if num_threads == -1 else num_threads + + def set_num_threads(self, num_threads): + set_func = getattr( + self.dynlib, "flexiblas_set_num_threads", lambda num_threads: None + ) + return set_func(num_threads) + + def get_version(self): + get_version_ = getattr(self.dynlib, "flexiblas_get_version", None) + if get_version_ is None: + return None + + major = ctypes.c_int() + minor = ctypes.c_int() + patch = ctypes.c_int() + get_version_(ctypes.byref(major), ctypes.byref(minor), ctypes.byref(patch)) + return f"{major.value}.{minor.value}.{patch.value}" + + def _get_backend_list(self, loaded=False): + """Return the list of available backends for FlexiBLAS. + + If loaded is False, return the list of available backends from the FlexiBLAS + configuration. If loaded is True, return the list of actually loaded backends. + """ + func_name = f"flexiblas_list{'_loaded' if loaded else ''}" + get_backend_list_ = getattr(self.dynlib, func_name, None) + if get_backend_list_ is None: + return None + + n_backends = get_backend_list_(None, 0, 0) + + backends = [] + for i in range(n_backends): + backend_name = ctypes.create_string_buffer(1024) + get_backend_list_(backend_name, 1024, i) + if backend_name.value.decode("utf-8") != "__FALLBACK__": + # We don't know when to expect __FALLBACK__ but it is not a real + # backend and does not show up when running flexiblas list. + backends.append(backend_name.value.decode("utf-8")) + return backends + + def _get_current_backend(self): + """Return the backend of FlexiBLAS""" + get_backend_ = getattr(self.dynlib, "flexiblas_current_backend", None) + if get_backend_ is None: + return None + + backend = ctypes.create_string_buffer(1024) + get_backend_(backend, ctypes.sizeof(backend)) + return backend.value.decode("utf-8") + + def switch_backend(self, backend): + """Switch the backend of FlexiBLAS + + Parameters + ---------- + backend : str + The name or the path to the shared library of the backend to switch to. If + the backend is not already loaded, it will be loaded first. + """ + if backend not in self.loaded_backends: + if backend in self.available_backends: + load_func = getattr(self.dynlib, "flexiblas_load_backend", lambda _: -1) + else: # assume backend is a path to a shared library + load_func = getattr( + self.dynlib, "flexiblas_load_backend_library", lambda _: -1 + ) + res = load_func(str(backend).encode("utf-8")) + if res == -1: + raise RuntimeError( + f"Failed to load backend {backend!r}. It must either be the name of" + " a backend available in the FlexiBLAS configuration " + f"{self.available_backends} or the path to a valid shared library." + ) + + # Trigger a new search of loaded shared libraries since loading a new + # backend caused a dlopen. + self.parent._load_libraries() + + switch_func = getattr(self.dynlib, "flexiblas_switch", lambda _: -1) + idx = self.loaded_backends.index(backend) + res = switch_func(idx) + if res == -1: + raise RuntimeError(f"Failed to switch to backend {backend!r}.") + + +class MKLController(LibController): + """Controller class for MKL""" + + user_api = "blas" + internal_api = "mkl" + filename_prefixes = ("libmkl_rt", "mkl_rt", "libblas") + check_symbols = ( + "MKL_Get_Max_Threads", + "MKL_Set_Num_Threads", + "MKL_Get_Version_String", + "MKL_Set_Threading_Layer", + ) + + def set_additional_attributes(self): + self.threading_layer = self._get_threading_layer() + + def get_num_threads(self): + get_func = getattr(self.dynlib, "MKL_Get_Max_Threads", lambda: None) + return get_func() + + def set_num_threads(self, num_threads): + set_func = getattr(self.dynlib, "MKL_Set_Num_Threads", lambda num_threads: None) + return set_func(num_threads) + + def get_version(self): + if not hasattr(self.dynlib, "MKL_Get_Version_String"): + return None + + res = ctypes.create_string_buffer(200) + self.dynlib.MKL_Get_Version_String(res, 200) + + version = res.value.decode("utf-8") + group = re.search(r"Version ([^ ]+) ", version) + if group is not None: + version = group.groups()[0] + return version.strip() + + def _get_threading_layer(self): + """Return the threading layer of MKL""" + # The function mkl_set_threading_layer returns the current threading + # layer. Calling it with an invalid threading layer allows us to safely + # get the threading layer + set_threading_layer = getattr( + self.dynlib, "MKL_Set_Threading_Layer", lambda layer: -1 + ) + layer_map = { + 0: "intel", + 1: "sequential", + 2: "pgi", + 3: "gnu", + 4: "tbb", + -1: "not specified", + } + return layer_map[set_threading_layer(-1)] + + +class OpenMPController(LibController): + """Controller class for OpenMP""" + + user_api = "openmp" + internal_api = "openmp" + filename_prefixes = ("libiomp", "libgomp", "libomp", "vcomp") + check_symbols = ( + "omp_get_max_threads", + "omp_get_num_threads", + ) + + def get_num_threads(self): + get_func = getattr(self.dynlib, "omp_get_max_threads", lambda: None) + return get_func() + + def set_num_threads(self, num_threads): + set_func = getattr(self.dynlib, "omp_set_num_threads", lambda num_threads: None) + return set_func(num_threads) + + def get_version(self): + # There is no way to get the version number programmatically in OpenMP. + return None + + +# Controllers for the libraries that we'll look for in the loaded libraries. +# Third party libraries can register their own controllers. +_ALL_CONTROLLERS = [ + OpenBLASController, + BLISController, + MKLController, + OpenMPController, + FlexiBLASController, +] + +# Helpers for the doc and test names +_ALL_USER_APIS = list(set(lib.user_api for lib in _ALL_CONTROLLERS)) +_ALL_INTERNAL_APIS = [lib.internal_api for lib in _ALL_CONTROLLERS] +_ALL_PREFIXES = list( + set(prefix for lib in _ALL_CONTROLLERS for prefix in lib.filename_prefixes) +) +_ALL_BLAS_LIBRARIES = [ + lib.internal_api for lib in _ALL_CONTROLLERS if lib.user_api == "blas" +] +_ALL_OPENMP_LIBRARIES = OpenMPController.filename_prefixes + + +def register(controller): + """Register a new controller""" + _ALL_CONTROLLERS.append(controller) + _ALL_USER_APIS.append(controller.user_api) + _ALL_INTERNAL_APIS.append(controller.internal_api) + _ALL_PREFIXES.extend(controller.filename_prefixes) + + +def _format_docstring(*args, **kwargs): + def decorator(o): + if o.__doc__ is not None: + o.__doc__ = o.__doc__.format(*args, **kwargs) + return o + + return decorator + + +@lru_cache(maxsize=10000) +def _realpath(filepath): + """Small caching wrapper around os.path.realpath to limit system calls""" + return os.path.realpath(filepath) + + +@_format_docstring(USER_APIS=list(_ALL_USER_APIS), INTERNAL_APIS=_ALL_INTERNAL_APIS) +def threadpool_info(): + """Return the maximal number of threads for each detected library. + + Return a list with all the supported libraries that have been found. Each + library is represented by a dict with the following information: + + - "user_api" : user API. Possible values are {USER_APIS}. + - "internal_api": internal API. Possible values are {INTERNAL_APIS}. + - "prefix" : filename prefix of the specific implementation. + - "filepath": path to the loaded library. + - "version": version of the library (if available). + - "num_threads": the current thread limit. + + In addition, each library may contain internal_api specific entries. + """ + return ThreadpoolController().info() + + +class _ThreadpoolLimiter: + """The guts of ThreadpoolController.limit + + Refer to the docstring of ThreadpoolController.limit for more details. + + It will only act on the library controllers held by the provided `controller`. + Using the default constructor sets the limits right away such that it can be used as + a callable. Setting the limits can be delayed by using the `wrap` class method such + that it can be used as a decorator. + """ + + def __init__(self, controller, *, limits=None, user_api=None): + self._controller = controller + self._limits, self._user_api, self._prefixes = self._check_params( + limits, user_api + ) + self._original_info = self._controller.info() + self._set_threadpool_limits() + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.restore_original_limits() + + @classmethod + def wrap(cls, controller, *, limits=None, user_api=None): + """Return an instance of this class that can be used as a decorator""" + return _ThreadpoolLimiterDecorator( + controller=controller, limits=limits, user_api=user_api + ) + + def restore_original_limits(self): + """Set the limits back to their original values""" + for lib_controller, original_info in zip( + self._controller.lib_controllers, self._original_info + ): + lib_controller.set_num_threads(original_info["num_threads"]) + + # Alias of `restore_original_limits` for backward compatibility + unregister = restore_original_limits + + def get_original_num_threads(self): + """Original num_threads from before calling threadpool_limits + + Return a dict `{user_api: num_threads}`. + """ + num_threads = {} + warning_apis = [] + + for user_api in self._user_api: + limits = [ + lib_info["num_threads"] + for lib_info in self._original_info + if lib_info["user_api"] == user_api + ] + limits = set(limits) + n_limits = len(limits) + + if n_limits == 1: + limit = limits.pop() + elif n_limits == 0: + limit = None + else: + limit = min(limits) + warning_apis.append(user_api) + + num_threads[user_api] = limit + + if warning_apis: + warnings.warn( + "Multiple value possible for following user apis: " + + ", ".join(warning_apis) + + ". Returning the minimum." + ) + + return num_threads + + def _check_params(self, limits, user_api): + """Suitable values for the _limits, _user_api and _prefixes attributes""" + + if isinstance(limits, str) and limits == "sequential_blas_under_openmp": + ( + limits, + user_api, + ) = self._controller._get_params_for_sequential_blas_under_openmp().values() + + if limits is None or isinstance(limits, int): + if user_api is None: + user_api = _ALL_USER_APIS + elif user_api in _ALL_USER_APIS: + user_api = [user_api] + else: + raise ValueError( + f"user_api must be either in {_ALL_USER_APIS} or None. Got " + f"{user_api} instead." + ) + + if limits is not None: + limits = {api: limits for api in user_api} + prefixes = [] + else: + if isinstance(limits, list): + # This should be a list of dicts of library info, for + # compatibility with the result from threadpool_info. + limits = { + lib_info["prefix"]: lib_info["num_threads"] for lib_info in limits + } + elif isinstance(limits, ThreadpoolController): + # To set the limits from the library controllers of a + # ThreadpoolController object. + limits = { + lib_controller.prefix: lib_controller.num_threads + for lib_controller in limits.lib_controllers + } + + if not isinstance(limits, dict): + raise TypeError( + "limits must either be an int, a list, a dict, or " + f"'sequential_blas_under_openmp'. Got {type(limits)} instead" + ) + + # With a dictionary, can set both specific limit for given + # libraries and global limit for user_api. Fetch each separately. + prefixes = [prefix for prefix in limits if prefix in _ALL_PREFIXES] + user_api = [api for api in limits if api in _ALL_USER_APIS] + + return limits, user_api, prefixes + + def _set_threadpool_limits(self): + """Change the maximal number of threads in selected thread pools. + + Return a list with all the supported libraries that have been found + matching `self._prefixes` and `self._user_api`. + """ + if self._limits is None: + return + + for lib_controller in self._controller.lib_controllers: + # self._limits is a dict {key: num_threads} where key is either + # a prefix or a user_api. If a library matches both, the limit + # corresponding to the prefix is chosen. + if lib_controller.prefix in self._limits: + num_threads = self._limits[lib_controller.prefix] + elif lib_controller.user_api in self._limits: + num_threads = self._limits[lib_controller.user_api] + else: + continue + + if num_threads is not None: + lib_controller.set_num_threads(num_threads) + + +class _ThreadpoolLimiterDecorator(_ThreadpoolLimiter, ContextDecorator): + """Same as _ThreadpoolLimiter but to be used as a decorator""" + + def __init__(self, controller, *, limits=None, user_api=None): + self._limits, self._user_api, self._prefixes = self._check_params( + limits, user_api + ) + self._controller = controller + + def __enter__(self): + # we need to set the limits here and not in the __init__ because we want the + # limits to be set when calling the decorated function, not when creating the + # decorator. + self._original_info = self._controller.info() + self._set_threadpool_limits() + return self + + +@_format_docstring( + USER_APIS=", ".join(f'"{api}"' for api in _ALL_USER_APIS), + BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), + OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), +) +class threadpool_limits(_ThreadpoolLimiter): + """Change the maximal number of threads that can be used in thread pools. + + This object can be used either as a callable (the construction of this object + limits the number of threads), as a context manager in a `with` block to + automatically restore the original state of the controlled libraries when exiting + the block, or as a decorator through its `wrap` method. + + Set the maximal number of threads that can be used in thread pools used in + the supported libraries to `limit`. This function works for libraries that + are already loaded in the interpreter and can be changed dynamically. + + This effect is global and impacts the whole Python process. There is no thread level + isolation as these libraries do not offer thread-local APIs to configure the number + of threads to use in nested parallel calls. + + Parameters + ---------- + limits : int, dict, 'sequential_blas_under_openmp' or None (default=None) + The maximal number of threads that can be used in thread pools + + - If int, sets the maximum number of threads to `limits` for each + library selected by `user_api`. + + - If it is a dictionary `{{key: max_threads}}`, this function sets a + custom maximum number of threads for each `key` which can be either a + `user_api` or a `prefix` for a specific library. + + - If 'sequential_blas_under_openmp', it will chose the appropriate `limits` + and `user_api` parameters for the specific use case of sequential BLAS + calls within an OpenMP parallel region. The `user_api` parameter is + ignored. + + - If None, this function does not do anything. + + user_api : {USER_APIS} or None (default=None) + APIs of libraries to limit. Used only if `limits` is an int. + + - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). + + - If "openmp", it will only limit OpenMP supported libraries + ({OPENMP_LIBS}). Note that it can affect the number of threads used + by the BLAS libraries if they rely on OpenMP. + + - If None, this function will apply to all supported libraries. + """ + + def __init__(self, limits=None, user_api=None): + super().__init__(ThreadpoolController(), limits=limits, user_api=user_api) + + @classmethod + def wrap(cls, limits=None, user_api=None): + return super().wrap(ThreadpoolController(), limits=limits, user_api=user_api) + + +class ThreadpoolController: + """Collection of LibController objects for all loaded supported libraries + + Attributes + ---------- + lib_controllers : list of `LibController` objects + The list of library controllers of all loaded supported libraries. + """ + + # Cache for libc under POSIX and a few system libraries under Windows. + # We use a class level cache instead of an instance level cache because + # it's very unlikely that a shared library will be unloaded and reloaded + # during the lifetime of a program. + _system_libraries = dict() + + def __init__(self): + self.lib_controllers = [] + self._load_libraries() + self._warn_if_incompatible_openmp() + + @classmethod + def _from_controllers(cls, lib_controllers): + new_controller = cls.__new__(cls) + new_controller.lib_controllers = lib_controllers + return new_controller + + def info(self): + """Return lib_controllers info as a list of dicts""" + return [lib_controller.info() for lib_controller in self.lib_controllers] + + def select(self, **kwargs): + """Return a ThreadpoolController containing a subset of its current + library controllers + + It will select all libraries matching at least one pair (key, value) from kwargs + where key is an entry of the library info dict (like "user_api", "internal_api", + "prefix", ...) and value is the value or a list of acceptable values for that + entry. + + For instance, `ThreadpoolController().select(internal_api=["blis", "openblas"])` + will select all library controllers whose internal_api is either "blis" or + "openblas". + """ + for key, vals in kwargs.items(): + kwargs[key] = [vals] if not isinstance(vals, list) else vals + + lib_controllers = [ + lib_controller + for lib_controller in self.lib_controllers + if any( + getattr(lib_controller, key, None) in vals + for key, vals in kwargs.items() + ) + ] + + return ThreadpoolController._from_controllers(lib_controllers) + + def _get_params_for_sequential_blas_under_openmp(self): + """Return appropriate params to use for a sequential BLAS call in an OpenMP loop + + This function takes into account the unexpected behavior of OpenBLAS with the + OpenMP threading layer. + """ + if self.select( + internal_api="openblas", threading_layer="openmp" + ).lib_controllers: + return {"limits": None, "user_api": None} + return {"limits": 1, "user_api": "blas"} + + @_format_docstring( + USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS), + BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), + OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), + ) + def limit(self, *, limits=None, user_api=None): + """Change the maximal number of threads that can be used in thread pools. + + This function returns an object that can be used either as a callable (the + construction of this object limits the number of threads) or as a context + manager, in a `with` block to automatically restore the original state of the + controlled libraries when exiting the block. + + Set the maximal number of threads that can be used in thread pools used in + the supported libraries to `limits`. This function works for libraries that + are already loaded in the interpreter and can be changed dynamically. + + This effect is global and impacts the whole Python process. There is no thread + level isolation as these libraries do not offer thread-local APIs to configure + the number of threads to use in nested parallel calls. + + Parameters + ---------- + limits : int, dict, 'sequential_blas_under_openmp' or None (default=None) + The maximal number of threads that can be used in thread pools + + - If int, sets the maximum number of threads to `limits` for each + library selected by `user_api`. + + - If it is a dictionary `{{key: max_threads}}`, this function sets a + custom maximum number of threads for each `key` which can be either a + `user_api` or a `prefix` for a specific library. + + - If 'sequential_blas_under_openmp', it will chose the appropriate `limits` + and `user_api` parameters for the specific use case of sequential BLAS + calls within an OpenMP parallel region. The `user_api` parameter is + ignored. + + - If None, this function does not do anything. + + user_api : {USER_APIS} or None (default=None) + APIs of libraries to limit. Used only if `limits` is an int. + + - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). + + - If "openmp", it will only limit OpenMP supported libraries + ({OPENMP_LIBS}). Note that it can affect the number of threads used + by the BLAS libraries if they rely on OpenMP. + + - If None, this function will apply to all supported libraries. + """ + return _ThreadpoolLimiter(self, limits=limits, user_api=user_api) + + @_format_docstring( + USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS), + BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES), + OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES), + ) + def wrap(self, *, limits=None, user_api=None): + """Change the maximal number of threads that can be used in thread pools. + + This function returns an object that can be used as a decorator. + + Set the maximal number of threads that can be used in thread pools used in + the supported libraries to `limits`. This function works for libraries that + are already loaded in the interpreter and can be changed dynamically. + + Parameters + ---------- + limits : int, dict or None (default=None) + The maximal number of threads that can be used in thread pools + + - If int, sets the maximum number of threads to `limits` for each + library selected by `user_api`. + + - If it is a dictionary `{{key: max_threads}}`, this function sets a + custom maximum number of threads for each `key` which can be either a + `user_api` or a `prefix` for a specific library. + + - If None, this function does not do anything. + + user_api : {USER_APIS} or None (default=None) + APIs of libraries to limit. Used only if `limits` is an int. + + - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}). + + - If "openmp", it will only limit OpenMP supported libraries + ({OPENMP_LIBS}). Note that it can affect the number of threads used + by the BLAS libraries if they rely on OpenMP. + + - If None, this function will apply to all supported libraries. + """ + return _ThreadpoolLimiter.wrap(self, limits=limits, user_api=user_api) + + def __len__(self): + return len(self.lib_controllers) + + def _load_libraries(self): + """Loop through loaded shared libraries and store the supported ones""" + if sys.platform == "darwin": + self._find_libraries_with_dyld() + elif sys.platform == "win32": + self._find_libraries_with_enum_process_module_ex() + elif "pyodide" in sys.modules: + self._find_libraries_pyodide() + else: + self._find_libraries_with_dl_iterate_phdr() + + def _find_libraries_with_dl_iterate_phdr(self): + """Loop through loaded libraries and return binders on supported ones + + This function is expected to work on POSIX system only. + This code is adapted from code by Intel developer @anton-malakhov + available at https://github.com/IntelPython/smp + + Copyright (c) 2017, Intel Corporation published under the BSD 3-Clause + license + """ + libc = self._get_libc() + if not hasattr(libc, "dl_iterate_phdr"): # pragma: no cover + warnings.warn( + "Could not find dl_iterate_phdr in the C standard library.", + RuntimeWarning, + ) + return [] + + # Callback function for `dl_iterate_phdr` which is called for every + # library loaded in the current process until it returns 1. + def match_library_callback(info, size, data): + # Get the path of the current library + filepath = info.contents.dlpi_name + if filepath: + filepath = filepath.decode("utf-8") + + # Store the library controller if it is supported and selected + self._make_controller_from_path(filepath) + return 0 + + c_func_signature = ctypes.CFUNCTYPE( + ctypes.c_int, # Return type + ctypes.POINTER(_dl_phdr_info), + ctypes.c_size_t, + ctypes.c_char_p, + ) + c_match_library_callback = c_func_signature(match_library_callback) + + data = ctypes.c_char_p(b"") + libc.dl_iterate_phdr(c_match_library_callback, data) + + def _find_libraries_with_dyld(self): + """Loop through loaded libraries and return binders on supported ones + + This function is expected to work on OSX system only + """ + libc = self._get_libc() + if not hasattr(libc, "_dyld_image_count"): # pragma: no cover + warnings.warn( + "Could not find _dyld_image_count in the C standard library.", + RuntimeWarning, + ) + return [] + + n_dyld = libc._dyld_image_count() + libc._dyld_get_image_name.restype = ctypes.c_char_p + + for i in range(n_dyld): + filepath = ctypes.string_at(libc._dyld_get_image_name(i)) + filepath = filepath.decode("utf-8") + + # Store the library controller if it is supported and selected + self._make_controller_from_path(filepath) + + def _find_libraries_with_enum_process_module_ex(self): + """Loop through loaded libraries and return binders on supported ones + + This function is expected to work on windows system only. + This code is adapted from code by Philipp Hagemeister @phihag available + at https://stackoverflow.com/questions/17474574 + """ + from ctypes.wintypes import DWORD, HMODULE, MAX_PATH + + PROCESS_QUERY_INFORMATION = 0x0400 + PROCESS_VM_READ = 0x0010 + + LIST_LIBRARIES_ALL = 0x03 + + ps_api = self._get_windll("Psapi") + kernel_32 = self._get_windll("kernel32") + + h_process = kernel_32.OpenProcess( + PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, os.getpid() + ) + if not h_process: # pragma: no cover + raise OSError(f"Could not open PID {os.getpid()}") + + try: + buf_count = 256 + needed = DWORD() + # Grow the buffer until it becomes large enough to hold all the + # module headers + while True: + buf = (HMODULE * buf_count)() + buf_size = ctypes.sizeof(buf) + if not ps_api.EnumProcessModulesEx( + h_process, + ctypes.byref(buf), + buf_size, + ctypes.byref(needed), + LIST_LIBRARIES_ALL, + ): + raise OSError("EnumProcessModulesEx failed") + if buf_size >= needed.value: + break + buf_count = needed.value // (buf_size // buf_count) + + count = needed.value // (buf_size // buf_count) + h_modules = map(HMODULE, buf[:count]) + + # Loop through all the module headers and get the library path + buf = ctypes.create_unicode_buffer(MAX_PATH) + n_size = DWORD() + for h_module in h_modules: + # Get the path of the current module + if not ps_api.GetModuleFileNameExW( + h_process, h_module, ctypes.byref(buf), ctypes.byref(n_size) + ): + raise OSError("GetModuleFileNameEx failed") + filepath = buf.value + + # Store the library controller if it is supported and selected + self._make_controller_from_path(filepath) + finally: + kernel_32.CloseHandle(h_process) + + def _find_libraries_pyodide(self): + """Pyodide specific implementation for finding loaded libraries. + + Adapted from suggestion in https://github.com/joblib/threadpoolctl/pull/169#issuecomment-1946696449. + + One day, we may have a simpler solution. libc dl_iterate_phdr needs to + be implemented in Emscripten and exposed in Pyodide, see + https://github.com/emscripten-core/emscripten/issues/21354 for more + details. + """ + try: + from pyodide_js._module import LDSO + except ImportError: + warnings.warn( + "Unable to import LDSO from pyodide_js._module. This should never " + "happen." + ) + return + + for filepath in LDSO.loadedLibsByName.as_object_map(): + # Some libraries are duplicated by Pyodide and do not exist in the + # filesystem, so we first check for the existence of the file. For + # more details, see + # https://github.com/joblib/threadpoolctl/pull/169#issuecomment-1947946728 + if os.path.exists(filepath): + self._make_controller_from_path(filepath) + + def _make_controller_from_path(self, filepath): + """Store a library controller if it is supported and selected""" + # Required to resolve symlinks + filepath = _realpath(filepath) + # `lower` required to take account of OpenMP dll case on Windows + # (vcomp, VCOMP, Vcomp, ...) + filename = os.path.basename(filepath).lower() + + # Loop through supported libraries to find if this filename corresponds + # to a supported one. + for controller_class in _ALL_CONTROLLERS: + # check if filename matches a supported prefix + prefix = self._check_prefix(filename, controller_class.filename_prefixes) + + # filename does not match any of the prefixes of the candidate + # library. move to next library. + if prefix is None: + continue + + # workaround for BLAS libraries packaged by conda-forge on windows, which + # are all renamed "libblas.dll". We thus have to check to which BLAS + # implementation it actually corresponds looking for implementation + # specific symbols. + if prefix == "libblas": + if filename.endswith(".dll"): + libblas = ctypes.CDLL(filepath, _RTLD_NOLOAD) + if not any( + hasattr(libblas, func) + for func in controller_class.check_symbols + ): + continue + else: + # We ignore libblas on other platforms than windows because there + # might be a libblas dso comming with openblas for instance that + # can't be used to instantiate a pertinent LibController (many + # symbols are missing) and would create confusion by making a + # duplicate entry in threadpool_info. + continue + + # filename matches a prefix. Now we check if the library has the symbols we + # are looking for. If none of the symbols exists, it's very likely not the + # expected library (e.g. a library having a common prefix with one of the + # our supported libraries). Otherwise, create and store the library + # controller. + lib_controller = controller_class( + filepath=filepath, prefix=prefix, parent=self + ) + + if filepath in (lib.filepath for lib in self.lib_controllers): + # We already have a controller for this library. + continue + + if not hasattr(controller_class, "check_symbols") or any( + hasattr(lib_controller.dynlib, func) + for func in controller_class.check_symbols + ): + self.lib_controllers.append(lib_controller) + + def _check_prefix(self, library_basename, filename_prefixes): + """Return the prefix library_basename starts with + + Return None if none matches. + """ + for prefix in filename_prefixes: + if library_basename.startswith(prefix): + return prefix + return None + + def _warn_if_incompatible_openmp(self): + """Raise a warning if llvm-OpenMP and intel-OpenMP are both loaded""" + prefixes = [lib_controller.prefix for lib_controller in self.lib_controllers] + msg = textwrap.dedent( + """ + Found Intel OpenMP ('libiomp') and LLVM OpenMP ('libomp') loaded at + the same time. Both libraries are known to be incompatible and this + can cause random crashes or deadlocks on Linux when loaded in the + same Python program. + Using threadpoolctl may cause crashes or deadlocks. For more + information and possible workarounds, please see + https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md + """ + ) + if "libomp" in prefixes and "libiomp" in prefixes: + warnings.warn(msg, RuntimeWarning) + + @classmethod + def _get_libc(cls): + """Load the lib-C for unix systems.""" + libc = cls._system_libraries.get("libc") + if libc is None: + # Remark: If libc is statically linked or if Python is linked against an + # alternative implementation of libc like musl, find_library will return + # None and CDLL will load the main program itself which should contain the + # libc symbols. We still name it libc for convenience. + # If the main program does not contain the libc symbols, it's ok because + # we check their presence later anyway. + libc = ctypes.CDLL(find_library("c"), mode=_RTLD_NOLOAD) + cls._system_libraries["libc"] = libc + return libc + + @classmethod + def _get_windll(cls, dll_name): + """Load a windows DLL""" + dll = cls._system_libraries.get(dll_name) + if dll is None: + dll = ctypes.WinDLL(f"{dll_name}.dll") + cls._system_libraries[dll_name] = dll + return dll + + +def _main(): + """Commandline interface to display thread-pool information and exit.""" + import argparse + import importlib + import json + import sys + + parser = argparse.ArgumentParser( + usage="python -m threadpoolctl -i numpy scipy.linalg xgboost", + description="Display thread-pool information and exit.", + ) + parser.add_argument( + "-i", + "--import", + dest="modules", + nargs="*", + default=(), + help="Python modules to import before introspecting thread-pools.", + ) + parser.add_argument( + "-c", + "--command", + help="a Python statement to execute before introspecting thread-pools.", + ) + + options = parser.parse_args(sys.argv[1:]) + for module in options.modules: + try: + importlib.import_module(module, package=None) + except ImportError: + print("WARNING: could not import", module, file=sys.stderr) + + if options.command: + exec(options.command) + + print(json.dumps(threadpool_info(), indent=2)) + + +if __name__ == "__main__": + _main() diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e81232ee16a4b592c7f11a5a970b92a5576729f8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/METADATA @@ -0,0 +1,1590 @@ +Metadata-Version: 2.1 +Name: tqdm +Version: 4.66.4 +Summary: Fast, Extensible Progress Meter +Maintainer-email: tqdm developers +License: MPL-2.0 AND MIT +Project-URL: homepage, https://tqdm.github.io +Project-URL: repository, https://github.com/tqdm/tqdm +Project-URL: changelog, https://tqdm.github.io/releases +Project-URL: wiki, https://github.com/tqdm/tqdm/wiki +Keywords: progressbar,progressmeter,progress,bar,meter,rate,eta,console,terminal,time +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Environment :: MacOS X +Classifier: Environment :: Other Environment +Classifier: Environment :: Win32 (MS Windows) +Classifier: Environment :: X11 Applications +Classifier: Framework :: IPython +Classifier: Framework :: Jupyter +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: End Users/Desktop +Classifier: Intended Audience :: Other Audience +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: MIT License +Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0) +Classifier: Operating System :: MacOS +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft +Classifier: Operating System :: Microsoft :: MS-DOS +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: POSIX :: BSD +Classifier: Operating System :: POSIX :: BSD :: FreeBSD +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: POSIX :: SunOS/Solaris +Classifier: Operating System :: Unix +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation +Classifier: Programming Language :: Python :: Implementation :: IronPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Unix Shell +Classifier: Topic :: Desktop Environment +Classifier: Topic :: Education :: Computer Aided Instruction (CAI) +Classifier: Topic :: Education :: Testing +Classifier: Topic :: Office/Business +Classifier: Topic :: Other/Nonlisted Topic +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Pre-processors +Classifier: Topic :: Software Development :: User Interfaces +Classifier: Topic :: System :: Installation/Setup +Classifier: Topic :: System :: Logging +Classifier: Topic :: System :: Monitoring +Classifier: Topic :: System :: Shells +Classifier: Topic :: Terminals +Classifier: Topic :: Utilities +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENCE +Requires-Dist: colorama ; platform_system == "Windows" +Provides-Extra: dev +Requires-Dist: pytest >=6 ; extra == 'dev' +Requires-Dist: pytest-cov ; extra == 'dev' +Requires-Dist: pytest-timeout ; extra == 'dev' +Requires-Dist: pytest-xdist ; extra == 'dev' +Provides-Extra: notebook +Requires-Dist: ipywidgets >=6 ; extra == 'notebook' +Provides-Extra: slack +Requires-Dist: slack-sdk ; extra == 'slack' +Provides-Extra: telegram +Requires-Dist: requests ; extra == 'telegram' + +|Logo| + +tqdm +==== + +|Py-Versions| |Versions| |Conda-Forge-Status| |Docker| |Snapcraft| + +|Build-Status| |Coverage-Status| |Branch-Coverage-Status| |Codacy-Grade| |Libraries-Rank| |PyPI-Downloads| + +|LICENCE| |OpenHub-Status| |binder-demo| |awesome-python| + +``tqdm`` derives from the Arabic word *taqaddum* (تقدّم) which can mean "progress," +and is an abbreviation for "I love you so much" in Spanish (*te quiero demasiado*). + +Instantly make your loops show a smart progress meter - just wrap any +iterable with ``tqdm(iterable)``, and you're done! + +.. code:: python + + from tqdm import tqdm + for i in tqdm(range(10000)): + ... + +``76%|████████████████████████        | 7568/10000 [00:33<00:10, 229.00it/s]`` + +``trange(N)`` can be also used as a convenient shortcut for +``tqdm(range(N))``. + +|Screenshot| + |Video| |Slides| |Merch| + +It can also be executed as a module with pipes: + +.. code:: sh + + $ seq 9999999 | tqdm --bytes | wc -l + 75.2MB [00:00, 217MB/s] + 9999999 + + $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \ + > backup.tgz + 32%|██████████▍ | 8.89G/27.9G [00:42<01:31, 223MB/s] + +Overhead is low -- about 60ns per iteration (80ns with ``tqdm.gui``), and is +unit tested against performance regression. +By comparison, the well-established +`ProgressBar `__ has +an 800ns/iter overhead. + +In addition to its low overhead, ``tqdm`` uses smart algorithms to predict +the remaining time and to skip unnecessary iteration displays, which allows +for a negligible overhead in most cases. + +``tqdm`` works on any platform +(Linux, Windows, Mac, FreeBSD, NetBSD, Solaris/SunOS), +in any console or in a GUI, and is also friendly with IPython/Jupyter notebooks. + +``tqdm`` does not require any dependencies (not even ``curses``!), just +Python and an environment supporting ``carriage return \r`` and +``line feed \n`` control characters. + +------------------------------------------ + +.. contents:: Table of contents + :backlinks: top + :local: + + +Installation +------------ + +Latest PyPI stable release +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +|Versions| |PyPI-Downloads| |Libraries-Dependents| + +.. code:: sh + + pip install tqdm + +Latest development release on GitHub +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +|GitHub-Status| |GitHub-Stars| |GitHub-Commits| |GitHub-Forks| |GitHub-Updated| + +Pull and install pre-release ``devel`` branch: + +.. code:: sh + + pip install "git+https://github.com/tqdm/tqdm.git@devel#egg=tqdm" + +Latest Conda release +~~~~~~~~~~~~~~~~~~~~ + +|Conda-Forge-Status| + +.. code:: sh + + conda install -c conda-forge tqdm + +Latest Snapcraft release +~~~~~~~~~~~~~~~~~~~~~~~~ + +|Snapcraft| + +There are 3 channels to choose from: + +.. code:: sh + + snap install tqdm # implies --stable, i.e. latest tagged release + snap install tqdm --candidate # master branch + snap install tqdm --edge # devel branch + +Note that ``snap`` binaries are purely for CLI use (not ``import``-able), and +automatically set up ``bash`` tab-completion. + +Latest Docker release +~~~~~~~~~~~~~~~~~~~~~ + +|Docker| + +.. code:: sh + + docker pull tqdm/tqdm + docker run -i --rm tqdm/tqdm --help + +Other +~~~~~ + +There are other (unofficial) places where ``tqdm`` may be downloaded, particularly for CLI use: + +|Repology| + +.. |Repology| image:: https://repology.org/badge/tiny-repos/python:tqdm.svg + :target: https://repology.org/project/python:tqdm/versions + +Changelog +--------- + +The list of all changes is available either on GitHub's Releases: +|GitHub-Status|, on the +`wiki `__, or on the +`website `__. + + +Usage +----- + +``tqdm`` is very versatile and can be used in a number of ways. +The three main ones are given below. + +Iterable-based +~~~~~~~~~~~~~~ + +Wrap ``tqdm()`` around any iterable: + +.. code:: python + + from tqdm import tqdm + from time import sleep + + text = "" + for char in tqdm(["a", "b", "c", "d"]): + sleep(0.25) + text = text + char + +``trange(i)`` is a special optimised instance of ``tqdm(range(i))``: + +.. code:: python + + from tqdm import trange + + for i in trange(100): + sleep(0.01) + +Instantiation outside of the loop allows for manual control over ``tqdm()``: + +.. code:: python + + pbar = tqdm(["a", "b", "c", "d"]) + for char in pbar: + sleep(0.25) + pbar.set_description("Processing %s" % char) + +Manual +~~~~~~ + +Manual control of ``tqdm()`` updates using a ``with`` statement: + +.. code:: python + + with tqdm(total=100) as pbar: + for i in range(10): + sleep(0.1) + pbar.update(10) + +If the optional variable ``total`` (or an iterable with ``len()``) is +provided, predictive stats are displayed. + +``with`` is also optional (you can just assign ``tqdm()`` to a variable, +but in this case don't forget to ``del`` or ``close()`` at the end: + +.. code:: python + + pbar = tqdm(total=100) + for i in range(10): + sleep(0.1) + pbar.update(10) + pbar.close() + +Module +~~~~~~ + +Perhaps the most wonderful use of ``tqdm`` is in a script or on the command +line. Simply inserting ``tqdm`` (or ``python -m tqdm``) between pipes will pass +through all ``stdin`` to ``stdout`` while printing progress to ``stderr``. + +The example below demonstrate counting the number of lines in all Python files +in the current directory, with timing information included. + +.. code:: sh + + $ time find . -name '*.py' -type f -exec cat \{} \; | wc -l + 857365 + + real 0m3.458s + user 0m0.274s + sys 0m3.325s + + $ time find . -name '*.py' -type f -exec cat \{} \; | tqdm | wc -l + 857366it [00:03, 246471.31it/s] + 857365 + + real 0m3.585s + user 0m0.862s + sys 0m3.358s + +Note that the usual arguments for ``tqdm`` can also be specified. + +.. code:: sh + + $ find . -name '*.py' -type f -exec cat \{} \; | + tqdm --unit loc --unit_scale --total 857366 >> /dev/null + 100%|█████████████████████████████████| 857K/857K [00:04<00:00, 246Kloc/s] + +Backing up a large directory? + +.. code:: sh + + $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \ + > backup.tgz + 44%|██████████████▊ | 153M/352M [00:14<00:18, 11.0MB/s] + +This can be beautified further: + +.. code:: sh + + $ BYTES=$(du -sb docs/ | cut -f1) + $ tar -cf - docs/ \ + | tqdm --bytes --total "$BYTES" --desc Processing | gzip \ + | tqdm --bytes --total "$BYTES" --desc Compressed --position 1 \ + > ~/backup.tgz + Processing: 100%|██████████████████████| 352M/352M [00:14<00:00, 30.2MB/s] + Compressed: 42%|█████████▎ | 148M/352M [00:14<00:19, 10.9MB/s] + +Or done on a file level using 7-zip: + +.. code:: sh + + $ 7z a -bd -r backup.7z docs/ | grep Compressing \ + | tqdm --total $(find docs/ -type f | wc -l) --unit files \ + | grep -v Compressing + 100%|██████████████████████████▉| 15327/15327 [01:00<00:00, 712.96files/s] + +Pre-existing CLI programs already outputting basic progress information will +benefit from ``tqdm``'s ``--update`` and ``--update_to`` flags: + +.. code:: sh + + $ seq 3 0.1 5 | tqdm --total 5 --update_to --null + 100%|████████████████████████████████████| 5.0/5 [00:00<00:00, 9673.21it/s] + $ seq 10 | tqdm --update --null # 1 + 2 + ... + 10 = 55 iterations + 55it [00:00, 90006.52it/s] + +FAQ and Known Issues +-------------------- + +|GitHub-Issues| + +The most common issues relate to excessive output on multiple lines, instead +of a neat one-line progress bar. + +- Consoles in general: require support for carriage return (``CR``, ``\r``). + + * Some cloud logging consoles which don't support ``\r`` properly + (`cloudwatch `__, + `K8s `__) may benefit from + ``export TQDM_POSITION=-1``. + +- Nested progress bars: + + * Consoles in general: require support for moving cursors up to the + previous line. For example, + `IDLE `__, + `ConEmu `__ and + `PyCharm `__ (also + `here `__, + `here `__, and + `here `__) + lack full support. + * Windows: additionally may require the Python module ``colorama`` + to ensure nested bars stay within their respective lines. + +- Unicode: + + * Environments which report that they support unicode will have solid smooth + progressbars. The fallback is an ``ascii``-only bar. + * Windows consoles often only partially support unicode and thus + `often require explicit ascii=True `__ + (also `here `__). This is due to + either normal-width unicode characters being incorrectly displayed as + "wide", or some unicode characters not rendering. + +- Wrapping generators: + + * Generator wrapper functions tend to hide the length of iterables. + ``tqdm`` does not. + * Replace ``tqdm(enumerate(...))`` with ``enumerate(tqdm(...))`` or + ``tqdm(enumerate(x), total=len(x), ...)``. + The same applies to ``numpy.ndenumerate``. + * Replace ``tqdm(zip(a, b))`` with ``zip(tqdm(a), b)`` or even + ``zip(tqdm(a), tqdm(b))``. + * The same applies to ``itertools``. + * Some useful convenience functions can be found under ``tqdm.contrib``. + +- `No intermediate output in docker-compose `__: + use ``docker-compose run`` instead of ``docker-compose up`` and ``tty: true``. + +- Overriding defaults via environment variables: + e.g. in CI/cloud jobs, ``export TQDM_MININTERVAL=5`` to avoid log spam. + This override logic is handled by the ``tqdm.utils.envwrap`` decorator + (useful independent of ``tqdm``). + +If you come across any other difficulties, browse and file |GitHub-Issues|. + +Documentation +------------- + +|Py-Versions| |README-Hits| (Since 19 May 2016) + +.. code:: python + + class tqdm(): + """ + Decorate an iterable object, returning an iterator which acts exactly + like the original iterable, but prints a dynamically updating + progressbar every time a value is requested. + """ + + @envwrap("TQDM_") # override defaults via env vars + def __init__(self, iterable=None, desc=None, total=None, leave=True, + file=None, ncols=None, mininterval=0.1, + maxinterval=10.0, miniters=None, ascii=None, disable=False, + unit='it', unit_scale=False, dynamic_ncols=False, + smoothing=0.3, bar_format=None, initial=0, position=None, + postfix=None, unit_divisor=1000, write_bytes=False, + lock_args=None, nrows=None, colour=None, delay=0): + +Parameters +~~~~~~~~~~ + +* iterable : iterable, optional + Iterable to decorate with a progressbar. + Leave blank to manually manage the updates. +* desc : str, optional + Prefix for the progressbar. +* total : int or float, optional + The number of expected iterations. If unspecified, + len(iterable) is used if possible. If float("inf") or as a last + resort, only basic progress statistics are displayed + (no ETA, no progressbar). + If ``gui`` is True and this parameter needs subsequent updating, + specify an initial arbitrary large positive number, + e.g. 9e9. +* leave : bool, optional + If [default: True], keeps all traces of the progressbar + upon termination of iteration. + If ``None``, will leave only if ``position`` is ``0``. +* file : ``io.TextIOWrapper`` or ``io.StringIO``, optional + Specifies where to output the progress messages + (default: sys.stderr). Uses ``file.write(str)`` and ``file.flush()`` + methods. For encoding, see ``write_bytes``. +* ncols : int, optional + The width of the entire output message. If specified, + dynamically resizes the progressbar to stay within this bound. + If unspecified, attempts to use environment width. The + fallback is a meter width of 10 and no limit for the counter and + statistics. If 0, will not print any meter (only stats). +* mininterval : float, optional + Minimum progress display update interval [default: 0.1] seconds. +* maxinterval : float, optional + Maximum progress display update interval [default: 10] seconds. + Automatically adjusts ``miniters`` to correspond to ``mininterval`` + after long display update lag. Only works if ``dynamic_miniters`` + or monitor thread is enabled. +* miniters : int or float, optional + Minimum progress display update interval, in iterations. + If 0 and ``dynamic_miniters``, will automatically adjust to equal + ``mininterval`` (more CPU efficient, good for tight loops). + If > 0, will skip display of specified number of iterations. + Tweak this and ``mininterval`` to get very efficient loops. + If your progress is erratic with both fast and slow iterations + (network, skipping items, etc) you should set miniters=1. +* ascii : bool or str, optional + If unspecified or False, use unicode (smooth blocks) to fill + the meter. The fallback is to use ASCII characters " 123456789#". +* disable : bool, optional + Whether to disable the entire progressbar wrapper + [default: False]. If set to None, disable on non-TTY. +* unit : str, optional + String that will be used to define the unit of each iteration + [default: it]. +* unit_scale : bool or int or float, optional + If 1 or True, the number of iterations will be reduced/scaled + automatically and a metric prefix following the + International System of Units standard will be added + (kilo, mega, etc.) [default: False]. If any other non-zero + number, will scale ``total`` and ``n``. +* dynamic_ncols : bool, optional + If set, constantly alters ``ncols`` and ``nrows`` to the + environment (allowing for window resizes) [default: False]. +* smoothing : float, optional + Exponential moving average smoothing factor for speed estimates + (ignored in GUI mode). Ranges from 0 (average speed) to 1 + (current/instantaneous speed) [default: 0.3]. +* bar_format : str, optional + Specify a custom bar string formatting. May impact performance. + [default: '{l_bar}{bar}{r_bar}'], where + l_bar='{desc}: {percentage:3.0f}%|' and + r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' + '{rate_fmt}{postfix}]' + Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, + percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, + rate, rate_fmt, rate_noinv, rate_noinv_fmt, + rate_inv, rate_inv_fmt, postfix, unit_divisor, + remaining, remaining_s, eta. + Note that a trailing ": " is automatically removed after {desc} + if the latter is empty. +* initial : int or float, optional + The initial counter value. Useful when restarting a progress + bar [default: 0]. If using float, consider specifying ``{n:.3f}`` + or similar in ``bar_format``, or specifying ``unit_scale``. +* position : int, optional + Specify the line offset to print this bar (starting from 0) + Automatic if unspecified. + Useful to manage multiple bars at once (eg, from threads). +* postfix : dict or ``*``, optional + Specify additional stats to display at the end of the bar. + Calls ``set_postfix(**postfix)`` if possible (dict). +* unit_divisor : float, optional + [default: 1000], ignored unless ``unit_scale`` is True. +* write_bytes : bool, optional + Whether to write bytes. If (default: False) will write unicode. +* lock_args : tuple, optional + Passed to ``refresh`` for intermediate output + (initialisation, iterating, and updating). +* nrows : int, optional + The screen height. If specified, hides nested bars outside this + bound. If unspecified, attempts to use environment height. + The fallback is 20. +* colour : str, optional + Bar colour (e.g. 'green', '#00ff00'). +* delay : float, optional + Don't display until [default: 0] seconds have elapsed. + +Extra CLI Options +~~~~~~~~~~~~~~~~~ + +* delim : chr, optional + Delimiting character [default: '\n']. Use '\0' for null. + N.B.: on Windows systems, Python converts '\n' to '\r\n'. +* buf_size : int, optional + String buffer size in bytes [default: 256] + used when ``delim`` is specified. +* bytes : bool, optional + If true, will count bytes, ignore ``delim``, and default + ``unit_scale`` to True, ``unit_divisor`` to 1024, and ``unit`` to 'B'. +* tee : bool, optional + If true, passes ``stdin`` to both ``stderr`` and ``stdout``. +* update : bool, optional + If true, will treat input as newly elapsed iterations, + i.e. numbers to pass to ``update()``. Note that this is slow + (~2e5 it/s) since every input must be decoded as a number. +* update_to : bool, optional + If true, will treat input as total elapsed iterations, + i.e. numbers to assign to ``self.n``. Note that this is slow + (~2e5 it/s) since every input must be decoded as a number. +* null : bool, optional + If true, will discard input (no stdout). +* manpath : str, optional + Directory in which to install tqdm man pages. +* comppath : str, optional + Directory in which to place tqdm completion. +* log : str, optional + CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. + +Returns +~~~~~~~ + +* out : decorated iterator. + +.. code:: python + + class tqdm(): + def update(self, n=1): + """ + Manually update the progress bar, useful for streams + such as reading files. + E.g.: + >>> t = tqdm(total=filesize) # Initialise + >>> for current_buffer in stream: + ... ... + ... t.update(len(current_buffer)) + >>> t.close() + The last line is highly recommended, but possibly not necessary if + ``t.update()`` will be called in such a way that ``filesize`` will be + exactly reached and printed. + + Parameters + ---------- + n : int or float, optional + Increment to add to the internal counter of iterations + [default: 1]. If using float, consider specifying ``{n:.3f}`` + or similar in ``bar_format``, or specifying ``unit_scale``. + + Returns + ------- + out : bool or None + True if a ``display()`` was triggered. + """ + + def close(self): + """Cleanup and (if leave=False) close the progressbar.""" + + def clear(self, nomove=False): + """Clear current bar display.""" + + def refresh(self): + """ + Force refresh the display of this bar. + + Parameters + ---------- + nolock : bool, optional + If ``True``, does not lock. + If [default: ``False``]: calls ``acquire()`` on internal lock. + lock_args : tuple, optional + Passed to internal lock's ``acquire()``. + If specified, will only ``display()`` if ``acquire()`` returns ``True``. + """ + + def unpause(self): + """Restart tqdm timer from last print time.""" + + def reset(self, total=None): + """ + Resets to 0 iterations for repeated use. + + Consider combining with ``leave=True``. + + Parameters + ---------- + total : int or float, optional. Total to use for the new bar. + """ + + def set_description(self, desc=None, refresh=True): + """ + Set/modify description of the progress bar. + + Parameters + ---------- + desc : str, optional + refresh : bool, optional + Forces refresh [default: True]. + """ + + def set_postfix(self, ordered_dict=None, refresh=True, **tqdm_kwargs): + """ + Set/modify postfix (additional stats) + with automatic formatting based on datatype. + + Parameters + ---------- + ordered_dict : dict or OrderedDict, optional + refresh : bool, optional + Forces refresh [default: True]. + kwargs : dict, optional + """ + + @classmethod + def write(cls, s, file=sys.stdout, end="\n"): + """Print a message via tqdm (without overlap with bars).""" + + @property + def format_dict(self): + """Public API for read-only member access.""" + + def display(self, msg=None, pos=None): + """ + Use ``self.sp`` to display ``msg`` in the specified ``pos``. + + Consider overloading this function when inheriting to use e.g.: + ``self.some_frontend(**self.format_dict)`` instead of ``self.sp``. + + Parameters + ---------- + msg : str, optional. What to display (default: ``repr(self)``). + pos : int, optional. Position to ``moveto`` + (default: ``abs(self.pos)``). + """ + + @classmethod + @contextmanager + def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs): + """ + stream : file-like object. + method : str, "read" or "write". The result of ``read()`` and + the first argument of ``write()`` should have a ``len()``. + + >>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj: + ... while True: + ... chunk = fobj.read(chunk_size) + ... if not chunk: + ... break + """ + + @classmethod + def pandas(cls, *targs, **tqdm_kwargs): + """Registers the current `tqdm` class with `pandas`.""" + + def trange(*args, **tqdm_kwargs): + """Shortcut for `tqdm(range(*args), **tqdm_kwargs)`.""" + +Convenience Functions +~~~~~~~~~~~~~~~~~~~~~ + +.. code:: python + + def tqdm.contrib.tenumerate(iterable, start=0, total=None, + tqdm_class=tqdm.auto.tqdm, **tqdm_kwargs): + """Equivalent of `numpy.ndenumerate` or builtin `enumerate`.""" + + def tqdm.contrib.tzip(iter1, *iter2plus, **tqdm_kwargs): + """Equivalent of builtin `zip`.""" + + def tqdm.contrib.tmap(function, *sequences, **tqdm_kwargs): + """Equivalent of builtin `map`.""" + +Submodules +~~~~~~~~~~ + +.. code:: python + + class tqdm.notebook.tqdm(tqdm.tqdm): + """IPython/Jupyter Notebook widget.""" + + class tqdm.auto.tqdm(tqdm.tqdm): + """Automatically chooses beween `tqdm.notebook` and `tqdm.tqdm`.""" + + class tqdm.asyncio.tqdm(tqdm.tqdm): + """Asynchronous version.""" + @classmethod + def as_completed(cls, fs, *, loop=None, timeout=None, total=None, + **tqdm_kwargs): + """Wrapper for `asyncio.as_completed`.""" + + class tqdm.gui.tqdm(tqdm.tqdm): + """Matplotlib GUI version.""" + + class tqdm.tk.tqdm(tqdm.tqdm): + """Tkinter GUI version.""" + + class tqdm.rich.tqdm(tqdm.tqdm): + """`rich.progress` version.""" + + class tqdm.keras.TqdmCallback(keras.callbacks.Callback): + """Keras callback for epoch and batch progress.""" + + class tqdm.dask.TqdmCallback(dask.callbacks.Callback): + """Dask callback for task progress.""" + + +``contrib`` ++++++++++++ + +The ``tqdm.contrib`` package also contains experimental modules: + +- ``tqdm.contrib.itertools``: Thin wrappers around ``itertools`` +- ``tqdm.contrib.concurrent``: Thin wrappers around ``concurrent.futures`` +- ``tqdm.contrib.slack``: Posts to `Slack `__ bots +- ``tqdm.contrib.discord``: Posts to `Discord `__ bots +- ``tqdm.contrib.telegram``: Posts to `Telegram `__ bots +- ``tqdm.contrib.bells``: Automagically enables all optional features + + * ``auto``, ``pandas``, ``slack``, ``discord``, ``telegram`` + +Examples and Advanced Usage +--------------------------- + +- See the `examples `__ + folder; +- import the module and run ``help()``; +- consult the `wiki `__; + + * this has an + `excellent article `__ + on how to make a **great** progressbar; + +- check out the `slides from PyData London `__, or +- run the |binder-demo|. + +Description and additional stats +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Custom information can be displayed and updated dynamically on ``tqdm`` bars +with the ``desc`` and ``postfix`` arguments: + +.. code:: python + + from tqdm import tqdm, trange + from random import random, randint + from time import sleep + + with trange(10) as t: + for i in t: + # Description will be displayed on the left + t.set_description('GEN %i' % i) + # Postfix will be displayed on the right, + # formatted automatically based on argument's datatype + t.set_postfix(loss=random(), gen=randint(1,999), str='h', + lst=[1, 2]) + sleep(0.1) + + with tqdm(total=10, bar_format="{postfix[0]} {postfix[1][value]:>8.2g}", + postfix=["Batch", {"value": 0}]) as t: + for i in range(10): + sleep(0.1) + t.postfix[1]["value"] = i / 2 + t.update() + +Points to remember when using ``{postfix[...]}`` in the ``bar_format`` string: + +- ``postfix`` also needs to be passed as an initial argument in a compatible + format, and +- ``postfix`` will be auto-converted to a string if it is a ``dict``-like + object. To prevent this behaviour, insert an extra item into the dictionary + where the key is not a string. + +Additional ``bar_format`` parameters may also be defined by overriding +``format_dict``, and the bar itself may be modified using ``ascii``: + +.. code:: python + + from tqdm import tqdm + class TqdmExtraFormat(tqdm): + """Provides a `total_time` format parameter""" + @property + def format_dict(self): + d = super().format_dict + total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) + d.update(total_time=self.format_interval(total_time) + " in total") + return d + + for i in TqdmExtraFormat( + range(9), ascii=" .oO0", + bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"): + if i == 4: + break + +.. code:: + + 00:00 in total: 44%|0000. | 4/9 [00:00<00:00, 962.93it/s] + +Note that ``{bar}`` also supports a format specifier ``[width][type]``. + +- ``width`` + + * unspecified (default): automatic to fill ``ncols`` + * ``int >= 0``: fixed width overriding ``ncols`` logic + * ``int < 0``: subtract from the automatic default + +- ``type`` + + * ``a``: ascii (``ascii=True`` override) + * ``u``: unicode (``ascii=False`` override) + * ``b``: blank (``ascii=" "`` override) + +This means a fixed bar with right-justified text may be created by using: +``bar_format="{l_bar}{bar:10}|{bar:-10b}right-justified"`` + +Nested progress bars +~~~~~~~~~~~~~~~~~~~~ + +``tqdm`` supports nested progress bars. Here's an example: + +.. code:: python + + from tqdm.auto import trange + from time import sleep + + for i in trange(4, desc='1st loop'): + for j in trange(5, desc='2nd loop'): + for k in trange(50, desc='3rd loop', leave=False): + sleep(0.01) + +For manual control over positioning (e.g. for multi-processing use), +you may specify ``position=n`` where ``n=0`` for the outermost bar, +``n=1`` for the next, and so on. +However, it's best to check if ``tqdm`` can work without manual ``position`` +first. + +.. code:: python + + from time import sleep + from tqdm import trange, tqdm + from multiprocessing import Pool, RLock, freeze_support + + L = list(range(9)) + + def progresser(n): + interval = 0.001 / (n + 2) + total = 5000 + text = f"#{n}, est. {interval * total:<04.2}s" + for _ in trange(total, desc=text, position=n): + sleep(interval) + + if __name__ == '__main__': + freeze_support() # for Windows support + tqdm.set_lock(RLock()) # for managing output contention + p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),)) + p.map(progresser, L) + +Note that in Python 3, ``tqdm.write`` is thread-safe: + +.. code:: python + + from time import sleep + from tqdm import tqdm, trange + from concurrent.futures import ThreadPoolExecutor + + L = list(range(9)) + + def progresser(n): + interval = 0.001 / (n + 2) + total = 5000 + text = f"#{n}, est. {interval * total:<04.2}s" + for _ in trange(total, desc=text): + sleep(interval) + if n == 6: + tqdm.write("n == 6 completed.") + tqdm.write("`tqdm.write()` is thread-safe in py3!") + + if __name__ == '__main__': + with ThreadPoolExecutor() as p: + p.map(progresser, L) + +Hooks and callbacks +~~~~~~~~~~~~~~~~~~~ + +``tqdm`` can easily support callbacks/hooks and manual updates. +Here's an example with ``urllib``: + +**``urllib.urlretrieve`` documentation** + + | [...] + | If present, the hook function will be called once + | on establishment of the network connection and once after each block read + | thereafter. The hook will be passed three arguments; a count of blocks + | transferred so far, a block size in bytes, and the total size of the file. + | [...] + +.. code:: python + + import urllib, os + from tqdm import tqdm + urllib = getattr(urllib, 'request', urllib) + + class TqdmUpTo(tqdm): + """Provides `update_to(n)` which uses `tqdm.update(delta_n)`.""" + def update_to(self, b=1, bsize=1, tsize=None): + """ + b : int, optional + Number of blocks transferred so far [default: 1]. + bsize : int, optional + Size of each block (in tqdm units) [default: 1]. + tsize : int, optional + Total size (in tqdm units). If [default: None] remains unchanged. + """ + if tsize is not None: + self.total = tsize + return self.update(b * bsize - self.n) # also sets self.n = b * bsize + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, + desc=eg_link.split('/')[-1]) as t: # all optional kwargs + urllib.urlretrieve(eg_link, filename=os.devnull, + reporthook=t.update_to, data=None) + t.total = t.n + +Inspired by `twine#242 `__. +Functional alternative in +`examples/tqdm_wget.py `__. + +It is recommend to use ``miniters=1`` whenever there is potentially +large differences in iteration speed (e.g. downloading a file over +a patchy connection). + +**Wrapping read/write methods** + +To measure throughput through a file-like object's ``read`` or ``write`` +methods, use ``CallbackIOWrapper``: + +.. code:: python + + from tqdm.auto import tqdm + from tqdm.utils import CallbackIOWrapper + + with tqdm(total=file_obj.size, + unit='B', unit_scale=True, unit_divisor=1024) as t: + fobj = CallbackIOWrapper(t.update, file_obj, "read") + while True: + chunk = fobj.read(chunk_size) + if not chunk: + break + t.reset() + # ... continue to use `t` for something else + +Alternatively, use the even simpler ``wrapattr`` convenience function, +which would condense both the ``urllib`` and ``CallbackIOWrapper`` examples +down to: + +.. code:: python + + import urllib, os + from tqdm import tqdm + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + response = getattr(urllib, 'request', urllib).urlopen(eg_link) + with tqdm.wrapattr(open(os.devnull, "wb"), "write", + miniters=1, desc=eg_link.split('/')[-1], + total=getattr(response, 'length', None)) as fout: + for chunk in response: + fout.write(chunk) + +The ``requests`` equivalent is nearly identical: + +.. code:: python + + import requests, os + from tqdm import tqdm + + eg_link = "https://caspersci.uk.to/matryoshka.zip" + response = requests.get(eg_link, stream=True) + with tqdm.wrapattr(open(os.devnull, "wb"), "write", + miniters=1, desc=eg_link.split('/')[-1], + total=int(response.headers.get('content-length', 0))) as fout: + for chunk in response.iter_content(chunk_size=4096): + fout.write(chunk) + +**Custom callback** + +``tqdm`` is known for intelligently skipping unnecessary displays. To make a +custom callback take advantage of this, simply use the return value of +``update()``. This is set to ``True`` if a ``display()`` was triggered. + +.. code:: python + + from tqdm.auto import tqdm as std_tqdm + + def external_callback(*args, **kwargs): + ... + + class TqdmExt(std_tqdm): + def update(self, n=1): + displayed = super().update(n) + if displayed: + external_callback(**self.format_dict) + return displayed + +``asyncio`` +~~~~~~~~~~~ + +Note that ``break`` isn't currently caught by asynchronous iterators. +This means that ``tqdm`` cannot clean up after itself in this case: + +.. code:: python + + from tqdm.asyncio import tqdm + + async for i in tqdm(range(9)): + if i == 2: + break + +Instead, either call ``pbar.close()`` manually or use the context manager syntax: + +.. code:: python + + from tqdm.asyncio import tqdm + + with tqdm(range(9)) as pbar: + async for i in pbar: + if i == 2: + break + +Pandas Integration +~~~~~~~~~~~~~~~~~~ + +Due to popular demand we've added support for ``pandas`` -- here's an example +for ``DataFrame.progress_apply`` and ``DataFrameGroupBy.progress_apply``: + +.. code:: python + + import pandas as pd + import numpy as np + from tqdm import tqdm + + df = pd.DataFrame(np.random.randint(0, 100, (100000, 6))) + + # Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm` + # (can use `tqdm.gui.tqdm`, `tqdm.notebook.tqdm`, optional kwargs, etc.) + tqdm.pandas(desc="my bar!") + + # Now you can use `progress_apply` instead of `apply` + # and `progress_map` instead of `map` + df.progress_apply(lambda x: x**2) + # can also groupby: + # df.groupby(0).progress_apply(lambda x: x**2) + +In case you're interested in how this works (and how to modify it for your +own callbacks), see the +`examples `__ +folder or import the module and run ``help()``. + +Keras Integration +~~~~~~~~~~~~~~~~~ + +A ``keras`` callback is also available: + +.. code:: python + + from tqdm.keras import TqdmCallback + + ... + + model.fit(..., verbose=0, callbacks=[TqdmCallback()]) + +Dask Integration +~~~~~~~~~~~~~~~~ + +A ``dask`` callback is also available: + +.. code:: python + + from tqdm.dask import TqdmCallback + + with TqdmCallback(desc="compute"): + ... + arr.compute() + + # or use callback globally + cb = TqdmCallback(desc="global") + cb.register() + arr.compute() + +IPython/Jupyter Integration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +IPython/Jupyter is supported via the ``tqdm.notebook`` submodule: + +.. code:: python + + from tqdm.notebook import trange, tqdm + from time import sleep + + for i in trange(3, desc='1st loop'): + for j in tqdm(range(100), desc='2nd loop'): + sleep(0.01) + +In addition to ``tqdm`` features, the submodule provides a native Jupyter +widget (compatible with IPython v1-v4 and Jupyter), fully working nested bars +and colour hints (blue: normal, green: completed, red: error/interrupt, +light blue: no ETA); as demonstrated below. + +|Screenshot-Jupyter1| +|Screenshot-Jupyter2| +|Screenshot-Jupyter3| + +The ``notebook`` version supports percentage or pixels for overall width +(e.g.: ``ncols='100%'`` or ``ncols='480px'``). + +It is also possible to let ``tqdm`` automatically choose between +console or notebook versions by using the ``autonotebook`` submodule: + +.. code:: python + + from tqdm.autonotebook import tqdm + tqdm.pandas() + +Note that this will issue a ``TqdmExperimentalWarning`` if run in a notebook +since it is not meant to be possible to distinguish between ``jupyter notebook`` +and ``jupyter console``. Use ``auto`` instead of ``autonotebook`` to suppress +this warning. + +Note that notebooks will display the bar in the cell where it was created. +This may be a different cell from the one where it is used. +If this is not desired, either + +- delay the creation of the bar to the cell where it must be displayed, or +- create the bar with ``display=False``, and in a later cell call + ``display(bar.container)``: + +.. code:: python + + from tqdm.notebook import tqdm + pbar = tqdm(..., display=False) + +.. code:: python + + # different cell + display(pbar.container) + +The ``keras`` callback has a ``display()`` method which can be used likewise: + +.. code:: python + + from tqdm.keras import TqdmCallback + cbk = TqdmCallback(display=False) + +.. code:: python + + # different cell + cbk.display() + model.fit(..., verbose=0, callbacks=[cbk]) + +Another possibility is to have a single bar (near the top of the notebook) +which is constantly re-used (using ``reset()`` rather than ``close()``). +For this reason, the notebook version (unlike the CLI version) does not +automatically call ``close()`` upon ``Exception``. + +.. code:: python + + from tqdm.notebook import tqdm + pbar = tqdm() + +.. code:: python + + # different cell + iterable = range(100) + pbar.reset(total=len(iterable)) # initialise with new `total` + for i in iterable: + pbar.update() + pbar.refresh() # force print final status but don't `close()` + +Custom Integration +~~~~~~~~~~~~~~~~~~ + +To change the default arguments (such as making ``dynamic_ncols=True``), +simply use built-in Python magic: + +.. code:: python + + from functools import partial + from tqdm import tqdm as std_tqdm + tqdm = partial(std_tqdm, dynamic_ncols=True) + +For further customisation, +``tqdm`` may be inherited from to create custom callbacks (as with the +``TqdmUpTo`` example `above <#hooks-and-callbacks>`__) or for custom frontends +(e.g. GUIs such as notebook or plotting packages). In the latter case: + +1. ``def __init__()`` to call ``super().__init__(..., gui=True)`` to disable + terminal ``status_printer`` creation. +2. Redefine: ``close()``, ``clear()``, ``display()``. + +Consider overloading ``display()`` to use e.g. +``self.frontend(**self.format_dict)`` instead of ``self.sp(repr(self))``. + +Some submodule examples of inheritance: + +- `tqdm/notebook.py `__ +- `tqdm/gui.py `__ +- `tqdm/tk.py `__ +- `tqdm/contrib/slack.py `__ +- `tqdm/contrib/discord.py `__ +- `tqdm/contrib/telegram.py `__ + +Dynamic Monitor/Meter +~~~~~~~~~~~~~~~~~~~~~ + +You can use a ``tqdm`` as a meter which is not monotonically increasing. +This could be because ``n`` decreases (e.g. a CPU usage monitor) or ``total`` +changes. + +One example would be recursively searching for files. The ``total`` is the +number of objects found so far, while ``n`` is the number of those objects which +are files (rather than folders): + +.. code:: python + + from tqdm import tqdm + import os.path + + def find_files_recursively(path, show_progress=True): + files = [] + # total=1 assumes `path` is a file + t = tqdm(total=1, unit="file", disable=not show_progress) + if not os.path.exists(path): + raise IOError("Cannot find:" + path) + + def append_found_file(f): + files.append(f) + t.update() + + def list_found_dir(path): + """returns os.listdir(path) assuming os.path.isdir(path)""" + listing = os.listdir(path) + # subtract 1 since a "file" we found was actually this directory + t.total += len(listing) - 1 + # fancy way to give info without forcing a refresh + t.set_postfix(dir=path[-10:], refresh=False) + t.update(0) # may trigger a refresh + return listing + + def recursively_search(path): + if os.path.isdir(path): + for f in list_found_dir(path): + recursively_search(os.path.join(path, f)) + else: + append_found_file(path) + + recursively_search(path) + t.set_postfix(dir=path) + t.close() + return files + +Using ``update(0)`` is a handy way to let ``tqdm`` decide when to trigger a +display refresh to avoid console spamming. + +Writing messages +~~~~~~~~~~~~~~~~ + +This is a work in progress (see +`#737 `__). + +Since ``tqdm`` uses a simple printing mechanism to display progress bars, +you should not write any message in the terminal using ``print()`` while +a progressbar is open. + +To write messages in the terminal without any collision with ``tqdm`` bar +display, a ``.write()`` method is provided: + +.. code:: python + + from tqdm.auto import tqdm, trange + from time import sleep + + bar = trange(10) + for i in bar: + # Print using tqdm class method .write() + sleep(0.1) + if not (i % 3): + tqdm.write("Done task %i" % i) + # Can also use bar.write() + +By default, this will print to standard output ``sys.stdout``. but you can +specify any file-like object using the ``file`` argument. For example, this +can be used to redirect the messages writing to a log file or class. + +Redirecting writing +~~~~~~~~~~~~~~~~~~~ + +If using a library that can print messages to the console, editing the library +by replacing ``print()`` with ``tqdm.write()`` may not be desirable. +In that case, redirecting ``sys.stdout`` to ``tqdm.write()`` is an option. + +To redirect ``sys.stdout``, create a file-like class that will write +any input string to ``tqdm.write()``, and supply the arguments +``file=sys.stdout, dynamic_ncols=True``. + +A reusable canonical example is given below: + +.. code:: python + + from time import sleep + import contextlib + import sys + from tqdm import tqdm + from tqdm.contrib import DummyTqdmFile + + + @contextlib.contextmanager + def std_out_err_redirect_tqdm(): + orig_out_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err) + yield orig_out_err[0] + # Relay exceptions + except Exception as exc: + raise exc + # Always restore sys.stdout/err if necessary + finally: + sys.stdout, sys.stderr = orig_out_err + + def some_fun(i): + print("Fee, fi, fo,".split()[i]) + + # Redirect stdout to tqdm.write() (don't forget the `as save_stdout`) + with std_out_err_redirect_tqdm() as orig_stdout: + # tqdm needs the original stdout + # and dynamic_ncols=True to autodetect console width + for i in tqdm(range(3), file=orig_stdout, dynamic_ncols=True): + sleep(.5) + some_fun(i) + + # After the `with`, printing is restored + print("Done!") + +Redirecting ``logging`` +~~~~~~~~~~~~~~~~~~~~~~~ + +Similar to ``sys.stdout``/``sys.stderr`` as detailed above, console ``logging`` +may also be redirected to ``tqdm.write()``. + +Warning: if also redirecting ``sys.stdout``/``sys.stderr``, make sure to +redirect ``logging`` first if needed. + +Helper methods are available in ``tqdm.contrib.logging``. For example: + +.. code:: python + + import logging + from tqdm import trange + from tqdm.contrib.logging import logging_redirect_tqdm + + LOG = logging.getLogger(__name__) + + if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + with logging_redirect_tqdm(): + for i in trange(9): + if i == 4: + LOG.info("console logging redirected to `tqdm.write()`") + # logging restored + +Monitoring thread, intervals and miniters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``tqdm`` implements a few tricks to increase efficiency and reduce overhead. + +- Avoid unnecessary frequent bar refreshing: ``mininterval`` defines how long + to wait between each refresh. ``tqdm`` always gets updated in the background, + but it will display only every ``mininterval``. +- Reduce number of calls to check system clock/time. +- ``mininterval`` is more intuitive to configure than ``miniters``. + A clever adjustment system ``dynamic_miniters`` will automatically adjust + ``miniters`` to the amount of iterations that fit into time ``mininterval``. + Essentially, ``tqdm`` will check if it's time to print without actually + checking time. This behaviour can be still be bypassed by manually setting + ``miniters``. + +However, consider a case with a combination of fast and slow iterations. +After a few fast iterations, ``dynamic_miniters`` will set ``miniters`` to a +large number. When iteration rate subsequently slows, ``miniters`` will +remain large and thus reduce display update frequency. To address this: + +- ``maxinterval`` defines the maximum time between display refreshes. + A concurrent monitoring thread checks for overdue updates and forces one + where necessary. + +The monitoring thread should not have a noticeable overhead, and guarantees +updates at least every 10 seconds by default. +This value can be directly changed by setting the ``monitor_interval`` of +any ``tqdm`` instance (i.e. ``t = tqdm.tqdm(...); t.monitor_interval = 2``). +The monitor thread may be disabled application-wide by setting +``tqdm.tqdm.monitor_interval = 0`` before instantiation of any ``tqdm`` bar. + + +Merch +----- + +You can buy `tqdm branded merch `__ now! + +Contributions +------------- + +|GitHub-Commits| |GitHub-Issues| |GitHub-PRs| |OpenHub-Status| |GitHub-Contributions| |CII Best Practices| + +All source code is hosted on `GitHub `__. +Contributions are welcome. + +See the +`CONTRIBUTING `__ +file for more information. + +Developers who have made significant contributions, ranked by *SLoC* +(surviving lines of code, +`git fame `__ ``-wMC --excl '\.(png|gif|jpg)$'``), +are: + +==================== ======================================================== ==== ================================ +Name ID SLoC Notes +==================== ======================================================== ==== ================================ +Casper da Costa-Luis `casperdcl `__ ~80% primary maintainer |Gift-Casper| +Stephen Larroque `lrq3000 `__ ~9% team member +Martin Zugnoni `martinzugnoni `__ ~3% +Daniel Ecer `de-code `__ ~2% +Richard Sheridan `richardsheridan `__ ~1% +Guangshuo Chen `chengs `__ ~1% +Helio Machado `0x2b3bfa0 `__ ~1% +Kyle Altendorf `altendky `__ <1% +Noam Yorav-Raphael `noamraph `__ <1% original author +Matthew Stevens `mjstevens777 `__ <1% +Hadrien Mary `hadim `__ <1% team member +Mikhail Korobov `kmike `__ <1% team member +==================== ======================================================== ==== ================================ + +Ports to Other Languages +~~~~~~~~~~~~~~~~~~~~~~~~ + +A list is available on +`this wiki page `__. + + +LICENCE +------- + +Open Source (OSI approved): |LICENCE| + +Citation information: |DOI| + +|README-Hits| (Since 19 May 2016) + +.. |Logo| image:: https://tqdm.github.io/img/logo.gif +.. |Screenshot| image:: https://tqdm.github.io/img/tqdm.gif +.. |Video| image:: https://tqdm.github.io/img/video.jpg + :target: https://tqdm.github.io/video +.. |Slides| image:: https://tqdm.github.io/img/slides.jpg + :target: https://tqdm.github.io/PyData2019/slides.html +.. |Merch| image:: https://tqdm.github.io/img/merch.jpg + :target: https://tqdm.github.io/merch +.. |Build-Status| image:: https://img.shields.io/github/actions/workflow/status/tqdm/tqdm/test.yml?branch=master&label=tqdm&logo=GitHub + :target: https://github.com/tqdm/tqdm/actions/workflows/test.yml +.. |Coverage-Status| image:: https://img.shields.io/coveralls/github/tqdm/tqdm/master?logo=coveralls + :target: https://coveralls.io/github/tqdm/tqdm +.. |Branch-Coverage-Status| image:: https://codecov.io/gh/tqdm/tqdm/branch/master/graph/badge.svg + :target: https://codecov.io/gh/tqdm/tqdm +.. |Codacy-Grade| image:: https://app.codacy.com/project/badge/Grade/3f965571598f44549c7818f29cdcf177 + :target: https://www.codacy.com/gh/tqdm/tqdm/dashboard +.. |CII Best Practices| image:: https://bestpractices.coreinfrastructure.org/projects/3264/badge + :target: https://bestpractices.coreinfrastructure.org/projects/3264 +.. |GitHub-Status| image:: https://img.shields.io/github/tag/tqdm/tqdm.svg?maxAge=86400&logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/releases +.. |GitHub-Forks| image:: https://img.shields.io/github/forks/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/network +.. |GitHub-Stars| image:: https://img.shields.io/github/stars/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/stargazers +.. |GitHub-Commits| image:: https://img.shields.io/github/commit-activity/y/tqdm/tqdm.svg?logo=git&logoColor=white + :target: https://github.com/tqdm/tqdm/graphs/commit-activity +.. |GitHub-Issues| image:: https://img.shields.io/github/issues-closed/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/issues?q= +.. |GitHub-PRs| image:: https://img.shields.io/github/issues-pr-closed/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/pulls +.. |GitHub-Contributions| image:: https://img.shields.io/github/contributors/tqdm/tqdm.svg?logo=github&logoColor=white + :target: https://github.com/tqdm/tqdm/graphs/contributors +.. |GitHub-Updated| image:: https://img.shields.io/github/last-commit/tqdm/tqdm/master.svg?logo=github&logoColor=white&label=pushed + :target: https://github.com/tqdm/tqdm/pulse +.. |Gift-Casper| image:: https://img.shields.io/badge/dynamic/json.svg?color=ff69b4&label=gifts%20received&prefix=%C2%A3&query=%24..sum&url=https%3A%2F%2Fcaspersci.uk.to%2Fgifts.json + :target: https://cdcl.ml/sponsor +.. |Versions| image:: https://img.shields.io/pypi/v/tqdm.svg + :target: https://tqdm.github.io/releases +.. |PyPI-Downloads| image:: https://img.shields.io/pypi/dm/tqdm.svg?label=pypi%20downloads&logo=PyPI&logoColor=white + :target: https://pepy.tech/project/tqdm +.. |Py-Versions| image:: https://img.shields.io/pypi/pyversions/tqdm.svg?logo=python&logoColor=white + :target: https://pypi.org/project/tqdm +.. |Conda-Forge-Status| image:: https://img.shields.io/conda/v/conda-forge/tqdm.svg?label=conda-forge&logo=conda-forge + :target: https://anaconda.org/conda-forge/tqdm +.. |Snapcraft| image:: https://img.shields.io/badge/snap-install-82BEA0.svg?logo=snapcraft + :target: https://snapcraft.io/tqdm +.. |Docker| image:: https://img.shields.io/badge/docker-pull-blue.svg?logo=docker&logoColor=white + :target: https://hub.docker.com/r/tqdm/tqdm +.. |Libraries-Rank| image:: https://img.shields.io/librariesio/sourcerank/pypi/tqdm.svg?logo=koding&logoColor=white + :target: https://libraries.io/pypi/tqdm +.. |Libraries-Dependents| image:: https://img.shields.io/librariesio/dependent-repos/pypi/tqdm.svg?logo=koding&logoColor=white + :target: https://github.com/tqdm/tqdm/network/dependents +.. |OpenHub-Status| image:: https://www.openhub.net/p/tqdm/widgets/project_thin_badge?format=gif + :target: https://www.openhub.net/p/tqdm?ref=Thin+badge +.. |awesome-python| image:: https://awesome.re/mentioned-badge.svg + :target: https://github.com/vinta/awesome-python +.. |LICENCE| image:: https://img.shields.io/pypi/l/tqdm.svg + :target: https://raw.githubusercontent.com/tqdm/tqdm/master/LICENCE +.. |DOI| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.595120-blue.svg + :target: https://doi.org/10.5281/zenodo.595120 +.. |binder-demo| image:: https://mybinder.org/badge_logo.svg + :target: https://mybinder.org/v2/gh/tqdm/tqdm/master?filepath=DEMO.ipynb +.. |Screenshot-Jupyter1| image:: https://tqdm.github.io/img/jupyter-1.gif +.. |Screenshot-Jupyter2| image:: https://tqdm.github.io/img/jupyter-2.gif +.. |Screenshot-Jupyter3| image:: https://tqdm.github.io/img/jupyter-3.gif +.. |README-Hits| image:: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&style=social&r=https://github.com/tqdm/tqdm&l=https://tqdm.github.io/img/favicon.png&f=https://tqdm.github.io/img/logo.gif + :target: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&a=plot&r=https://github.com/tqdm/tqdm&l=https://tqdm.github.io/img/favicon.png&f=https://tqdm.github.io/img/logo.gif&style=social diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..540e60f4e073bc53a5f0a521a3639e0d80780af4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +tqdm = tqdm.cli:main diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..78620c472c9d799a14ccb02a0233f4669b3bcdcb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm-4.66.4.dist-info/top_level.txt @@ -0,0 +1 @@ +tqdm diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions.py b/llmeval-env/lib/python3.10/site-packages/typing_extensions.py new file mode 100644 index 0000000000000000000000000000000000000000..9ccd519ce4f16345bd77a0eae03deaae16bf4062 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions.py @@ -0,0 +1,3332 @@ +import abc +import collections +import collections.abc +import functools +import inspect +import operator +import sys +import types as _types +import typing +import warnings + +__all__ = [ + # Super-special typing primitives. + 'Any', + 'ClassVar', + 'Concatenate', + 'Final', + 'LiteralString', + 'ParamSpec', + 'ParamSpecArgs', + 'ParamSpecKwargs', + 'Self', + 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Unpack', + + # ABCs (from collections.abc). + 'Awaitable', + 'AsyncIterator', + 'AsyncIterable', + 'Coroutine', + 'AsyncGenerator', + 'AsyncContextManager', + 'Buffer', + 'ChainMap', + + # Concrete collection types. + 'ContextManager', + 'Counter', + 'Deque', + 'DefaultDict', + 'NamedTuple', + 'OrderedDict', + 'TypedDict', + + # Structural checks, a.k.a. protocols. + 'SupportsAbs', + 'SupportsBytes', + 'SupportsComplex', + 'SupportsFloat', + 'SupportsIndex', + 'SupportsInt', + 'SupportsRound', + + # One-off things. + 'Annotated', + 'assert_never', + 'assert_type', + 'clear_overloads', + 'dataclass_transform', + 'deprecated', + 'Doc', + 'get_overloads', + 'final', + 'get_args', + 'get_origin', + 'get_original_bases', + 'get_protocol_members', + 'get_type_hints', + 'IntVar', + 'is_protocol', + 'is_typeddict', + 'Literal', + 'NewType', + 'overload', + 'override', + 'Protocol', + 'reveal_type', + 'runtime', + 'runtime_checkable', + 'Text', + 'TypeAlias', + 'TypeAliasType', + 'TypeGuard', + 'TypeIs', + 'TYPE_CHECKING', + 'Never', + 'NoReturn', + 'ReadOnly', + 'Required', + 'NotRequired', + + # Pure aliases, have always been in typing + 'AbstractSet', + 'AnyStr', + 'BinaryIO', + 'Callable', + 'Collection', + 'Container', + 'Dict', + 'ForwardRef', + 'FrozenSet', + 'Generator', + 'Generic', + 'Hashable', + 'IO', + 'ItemsView', + 'Iterable', + 'Iterator', + 'KeysView', + 'List', + 'Mapping', + 'MappingView', + 'Match', + 'MutableMapping', + 'MutableSequence', + 'MutableSet', + 'Optional', + 'Pattern', + 'Reversible', + 'Sequence', + 'Set', + 'Sized', + 'TextIO', + 'Tuple', + 'Union', + 'ValuesView', + 'cast', + 'no_type_check', + 'no_type_check_decorator', +] + +# for backward compatibility +PEP_560 = True +GenericMeta = type + +# The functions below are modified copies of typing internal helpers. +# They are needed by _ProtocolMeta and they provide support for PEP 646. + + +class _Sentinel: + def __repr__(self): + return "" + + +_marker = _Sentinel() + + +if sys.version_info >= (3, 10): + def _should_collect_from_parameters(t): + return isinstance( + t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) + ) +elif sys.version_info >= (3, 9): + def _should_collect_from_parameters(t): + return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) +else: + def _should_collect_from_parameters(t): + return isinstance(t, typing._GenericAlias) and not t._special + + +NoReturn = typing.NoReturn + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar('T') # Any type. +KT = typing.TypeVar('KT') # Key type. +VT = typing.TypeVar('VT') # Value type. +T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. +T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + + +if sys.version_info >= (3, 11): + from typing import Any +else: + + class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing_extensions.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing_extensions.Any" + return super().__repr__() + + class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls, *args, **kwargs) + + +ClassVar = typing.ClassVar + + +class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + +Final = typing.Final + +if sys.version_info >= (3, 11): + final = typing.final +else: + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + +def IntVar(name): + return typing.TypeVar(name) + + +# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8 +if sys.version_info >= (3, 10, 1): + Literal = typing.Literal +else: + def _flatten_literal_params(parameters): + """An internal helper for Literal creation: flatten Literals among parameters""" + params = [] + for p in parameters: + if isinstance(p, _LiteralGenericAlias): + params.extend(p.__args__) + else: + params.append(p) + return tuple(params) + + def _value_and_type_iter(params): + for p in params: + yield p, type(p) + + class _LiteralGenericAlias(typing._GenericAlias, _root=True): + def __eq__(self, other): + if not isinstance(other, _LiteralGenericAlias): + return NotImplemented + these_args_deduped = set(_value_and_type_iter(self.__args__)) + other_args_deduped = set(_value_and_type_iter(other.__args__)) + return these_args_deduped == other_args_deduped + + def __hash__(self): + return hash(frozenset(_value_and_type_iter(self.__args__))) + + class _LiteralForm(_ExtensionsSpecialForm, _root=True): + def __init__(self, doc: str): + self._name = 'Literal' + self._doc = self.__doc__ = doc + + def __getitem__(self, parameters): + if not isinstance(parameters, tuple): + parameters = (parameters,) + + parameters = _flatten_literal_params(parameters) + + val_type_pairs = list(_value_and_type_iter(parameters)) + try: + deduped_pairs = set(val_type_pairs) + except TypeError: + # unhashable parameters + pass + else: + # similar logic to typing._deduplicate on Python 3.9+ + if len(deduped_pairs) < len(val_type_pairs): + new_parameters = [] + for pair in val_type_pairs: + if pair in deduped_pairs: + new_parameters.append(pair[0]) + deduped_pairs.remove(pair) + assert not deduped_pairs, deduped_pairs + parameters = tuple(new_parameters) + + return _LiteralGenericAlias(self, parameters) + + Literal = _LiteralForm(doc="""\ + A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""") + + +_overload_dummy = typing._overload_dummy + + +if hasattr(typing, "get_overloads"): # 3.11+ + overload = typing.overload + get_overloads = typing.get_overloads + clear_overloads = typing.clear_overloads +else: + # {module: {qualname: {firstlineno: func}}} + _overload_registry = collections.defaultdict( + functools.partial(collections.defaultdict, dict) + ) + + def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][ + f.__code__.co_firstlineno + ] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() + + +# This is not a real generic class. Don't use outside annotations. +Type = typing.Type + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. +Awaitable = typing.Awaitable +Coroutine = typing.Coroutine +AsyncIterable = typing.AsyncIterable +AsyncIterator = typing.AsyncIterator +Deque = typing.Deque +ContextManager = typing.ContextManager +AsyncContextManager = typing.AsyncContextManager +DefaultDict = typing.DefaultDict +OrderedDict = typing.OrderedDict +Counter = typing.Counter +ChainMap = typing.ChainMap +AsyncGenerator = typing.AsyncGenerator +Text = typing.Text +TYPE_CHECKING = typing.TYPE_CHECKING + + +_PROTO_ALLOWLIST = { + 'collections.abc': [ + 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', + 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer', + ], + 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'], + 'typing_extensions': ['Buffer'], +} + + +_EXCLUDED_ATTRS = { + "__abstractmethods__", "__annotations__", "__weakref__", "_is_protocol", + "_is_runtime_protocol", "__dict__", "__slots__", "__parameters__", + "__orig_bases__", "__module__", "_MutableMapping__marker", "__doc__", + "__subclasshook__", "__orig_class__", "__init__", "__new__", + "__protocol_attrs__", "__non_callable_proto_members__", + "__match_args__", +} + +if sys.version_info >= (3, 9): + _EXCLUDED_ATTRS.add("__class_getitem__") + +if sys.version_info >= (3, 12): + _EXCLUDED_ATTRS.add("__type_params__") + +_EXCLUDED_ATTRS = frozenset(_EXCLUDED_ATTRS) + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in {'Protocol', 'Generic'}: + continue + annotations = getattr(base, '__annotations__', {}) + for attr in (*base.__dict__, *annotations): + if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS): + attrs.add(attr) + return attrs + + +def _caller(depth=2): + try: + return sys._getframe(depth).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): # For platforms without _getframe() + return None + + +# `__match_args__` attribute was removed from protocol members in 3.13, +# we want to backport this change to older Python versions. +if sys.version_info >= (3, 13): + Protocol = typing.Protocol +else: + def _allow_reckless_class_checks(depth=3): + """Allow instance and class checks for special stdlib modules. + The abc and functools modules indiscriminately call isinstance() and + issubclass() on the whole MRO of a user class, which may contain protocols. + """ + return _caller(depth) in {'abc', 'functools', None} + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + def _type_check_issubclass_arg_1(arg): + """Raise TypeError if `arg` is not an instance of `type` + in `issubclass(arg, )`. + + In most cases, this is verified by type.__subclasscheck__. + Checking it again unnecessarily would slow down issubclass() checks, + so, we don't perform this check unless we absolutely have to. + + For various error paths, however, + we want to ensure that *this* error message is shown to the user + where relevant, rather than a typing.py-specific error message. + """ + if not isinstance(arg, type): + # Same error message as for issubclass(1, int). + raise TypeError('issubclass() arg 1 must be a class') + + # Inheriting from typing._ProtocolMeta isn't actually desirable, + # but is necessary to allow typing.Protocol and typing_extensions.Protocol + # to mix without getting TypeErrors about "metaclass conflict" + class _ProtocolMeta(type(typing.Protocol)): + # This metaclass is somewhat unfortunate, + # but is necessary for several reasons... + # + # NOTE: DO NOT call super() in any methods in this class + # That would call the methods on typing._ProtocolMeta on Python 3.8-3.11 + # and those are slow + def __new__(mcls, name, bases, namespace, **kwargs): + if name == "Protocol" and len(bases) < 2: + pass + elif {Protocol, typing.Protocol} & set(bases): + for base in bases: + if not ( + base in {object, typing.Generic, Protocol, typing.Protocol} + or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, []) + or is_protocol(base) + ): + raise TypeError( + f"Protocols can only inherit from other protocols, " + f"got {base!r}" + ) + return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs) + + def __init__(cls, *args, **kwargs): + abc.ABCMeta.__init__(cls, *args, **kwargs) + if getattr(cls, "_is_protocol", False): + cls.__protocol_attrs__ = _get_protocol_attrs(cls) + + def __subclasscheck__(cls, other): + if cls is Protocol: + return type.__subclasscheck__(cls, other) + if ( + getattr(cls, '_is_protocol', False) + and not _allow_reckless_class_checks() + ): + if not getattr(cls, '_is_runtime_protocol', False): + _type_check_issubclass_arg_1(other) + raise TypeError( + "Instance and class checks can only be used with " + "@runtime_checkable protocols" + ) + if ( + # this attribute is set by @runtime_checkable: + cls.__non_callable_proto_members__ + and cls.__dict__.get("__subclasshook__") is _proto_hook + ): + _type_check_issubclass_arg_1(other) + non_method_attrs = sorted(cls.__non_callable_proto_members__) + raise TypeError( + "Protocols with non-method members don't support issubclass()." + f" Non-method members: {str(non_method_attrs)[1:-1]}." + ) + return abc.ABCMeta.__subclasscheck__(cls, other) + + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if cls is Protocol: + return type.__instancecheck__(cls, instance) + if not getattr(cls, "_is_protocol", False): + # i.e., it's a concrete subclass of a protocol + return abc.ABCMeta.__instancecheck__(cls, instance) + + if ( + not getattr(cls, '_is_runtime_protocol', False) and + not _allow_reckless_class_checks() + ): + raise TypeError("Instance and class checks can only be used with" + " @runtime_checkable protocols") + + if abc.ABCMeta.__instancecheck__(cls, instance): + return True + + for attr in cls.__protocol_attrs__: + try: + val = inspect.getattr_static(instance, attr) + except AttributeError: + break + # this attribute is set by @runtime_checkable: + if val is None and attr not in cls.__non_callable_proto_members__: + break + else: + return True + + return False + + def __eq__(cls, other): + # Hack so that typing.Generic.__class_getitem__ + # treats typing_extensions.Protocol + # as equivalent to typing.Protocol + if abc.ABCMeta.__eq__(cls, other) is True: + return True + return cls is Protocol and other is typing.Protocol + + # This has to be defined, or the abc-module cache + # complains about classes with this metaclass being unhashable, + # if we define only __eq__! + def __hash__(cls) -> int: + return type.__hash__(cls) + + @classmethod + def _proto_hook(cls, other): + if not cls.__dict__.get('_is_protocol', False): + return NotImplemented + + for attr in cls.__protocol_attrs__: + for base in other.__mro__: + # Check if the members appears in the class dictionary... + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + + # ...or in annotations, if it is a sub-protocol. + annotations = getattr(base, '__annotations__', {}) + if ( + isinstance(annotations, collections.abc.Mapping) + and attr in annotations + and is_protocol(other) + ): + break + else: + return NotImplemented + return True + + class Protocol(typing.Generic, metaclass=_ProtocolMeta): + __doc__ = typing.Protocol.__doc__ + __slots__ = () + _is_protocol = True + _is_runtime_protocol = False + + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', False): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # Prohibit instantiation for protocol classes + if cls._is_protocol and cls.__init__ is Protocol.__init__: + cls.__init__ = _no_init + + +if sys.version_info >= (3, 13): + runtime_checkable = typing.runtime_checkable +else: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol. + + Such protocol can be used with isinstance() and issubclass(). + Raise TypeError if applied to a non-protocol class. + This allows a simple-minded structural check very similar to + one trick ponies in collections.abc such as Iterable. + + For example:: + + @runtime_checkable + class Closable(Protocol): + def close(self): ... + + assert isinstance(open('/some/file'), Closable) + + Warning: this will check only the presence of the required methods, + not their type signatures! + """ + if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False): + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + ' got %r' % cls) + cls._is_runtime_protocol = True + + # Only execute the following block if it's a typing_extensions.Protocol class. + # typing.Protocol classes don't need it. + if isinstance(cls, _ProtocolMeta): + # PEP 544 prohibits using issubclass() + # with protocols that have non-method members. + # See gh-113320 for why we compute this attribute here, + # rather than in `_ProtocolMeta.__init__` + cls.__non_callable_proto_members__ = set() + for attr in cls.__protocol_attrs__: + try: + is_callable = callable(getattr(cls, attr, None)) + except Exception as e: + raise TypeError( + f"Failed to determine whether protocol member {attr!r} " + "is a method member" + ) from e + else: + if not is_callable: + cls.__non_callable_proto_members__.add(attr) + + return cls + + +# The "runtime" alias exists for backwards compatibility. +runtime = runtime_checkable + + +# Our version of runtime-checkable protocols is faster on Python 3.8-3.11 +if sys.version_info >= (3, 12): + SupportsInt = typing.SupportsInt + SupportsFloat = typing.SupportsFloat + SupportsComplex = typing.SupportsComplex + SupportsBytes = typing.SupportsBytes + SupportsIndex = typing.SupportsIndex + SupportsAbs = typing.SupportsAbs + SupportsRound = typing.SupportsRound +else: + @runtime_checkable + class SupportsInt(Protocol): + """An ABC with one abstract method __int__.""" + __slots__ = () + + @abc.abstractmethod + def __int__(self) -> int: + pass + + @runtime_checkable + class SupportsFloat(Protocol): + """An ABC with one abstract method __float__.""" + __slots__ = () + + @abc.abstractmethod + def __float__(self) -> float: + pass + + @runtime_checkable + class SupportsComplex(Protocol): + """An ABC with one abstract method __complex__.""" + __slots__ = () + + @abc.abstractmethod + def __complex__(self) -> complex: + pass + + @runtime_checkable + class SupportsBytes(Protocol): + """An ABC with one abstract method __bytes__.""" + __slots__ = () + + @abc.abstractmethod + def __bytes__(self) -> bytes: + pass + + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + @runtime_checkable + class SupportsAbs(Protocol[T_co]): + """ + An ABC with one abstract method __abs__ that is covariant in its return type. + """ + __slots__ = () + + @abc.abstractmethod + def __abs__(self) -> T_co: + pass + + @runtime_checkable + class SupportsRound(Protocol[T_co]): + """ + An ABC with one abstract method __round__ that is covariant in its return type. + """ + __slots__ = () + + @abc.abstractmethod + def __round__(self, ndigits: int = 0) -> T_co: + pass + + +def _ensure_subclassable(mro_entries): + def inner(func): + if sys.implementation.name == "pypy" and sys.version_info < (3, 9): + cls_dict = { + "__call__": staticmethod(func), + "__mro_entries__": staticmethod(mro_entries) + } + t = type(func.__name__, (), cls_dict) + return functools.update_wrapper(t(), func) + else: + func.__mro_entries__ = mro_entries + return func + return inner + + +# Update this to something like >=3.13.0b1 if and when +# PEP 728 is implemented in CPython +_PEP_728_IMPLEMENTED = False + +if _PEP_728_IMPLEMENTED: + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" + # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + # The standard library TypedDict below Python 3.11 does not store runtime + # information about optional and required keys when using Required or NotRequired. + # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. + # Aaaand on 3.12 we add __orig_bases__ to TypedDict + # to enable better runtime introspection. + # On 3.13 we deprecate some odd ways of creating TypedDicts. + # Also on 3.13, PEP 705 adds the ReadOnly[] qualifier. + # PEP 728 (still pending) makes more changes. + TypedDict = typing.TypedDict + _TypedDictMeta = typing._TypedDictMeta + is_typeddict = typing.is_typeddict +else: + # 3.10.0 and later + _TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters + + def _get_typeddict_qualifiers(annotation_type): + while True: + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + else: + break + elif annotation_origin is Required: + yield Required + annotation_type, = get_args(annotation_type) + elif annotation_origin is NotRequired: + yield NotRequired + annotation_type, = get_args(annotation_type) + elif annotation_origin is ReadOnly: + yield ReadOnly + annotation_type, = get_args(annotation_type) + else: + break + + class _TypedDictMeta(type): + def __new__(cls, name, bases, ns, *, total=True, closed=False): + """Create new typed dict class object. + + This method is called when TypedDict is subclassed, + or when TypedDict is instantiated. This way + TypedDict supports all three syntax forms described in its docstring. + Subclasses and instances of TypedDict return actual dictionaries. + """ + for base in bases: + if type(base) is not _TypedDictMeta and base is not typing.Generic: + raise TypeError('cannot inherit from both a TypedDict type ' + 'and a non-TypedDict base class') + + if any(issubclass(b, typing.Generic) for b in bases): + generic_base = (typing.Generic,) + else: + generic_base = () + + # typing.py generally doesn't let you inherit from plain Generic, unless + # the name of the class happens to be "Protocol" + tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns) + tp_dict.__name__ = name + if tp_dict.__qualname__ == "Protocol": + tp_dict.__qualname__ = name + + if not hasattr(tp_dict, '__orig_bases__'): + tp_dict.__orig_bases__ = bases + + annotations = {} + own_annotations = ns.get('__annotations__', {}) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + if _TAKES_MODULE: + own_annotations = { + n: typing._type_check(tp, msg, module=tp_dict.__module__) + for n, tp in own_annotations.items() + } + else: + own_annotations = { + n: typing._type_check(tp, msg) + for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + readonly_keys = set() + mutable_keys = set() + extra_items_type = None + + for base in bases: + base_dict = base.__dict__ + + annotations.update(base_dict.get('__annotations__', {})) + required_keys.update(base_dict.get('__required_keys__', ())) + optional_keys.update(base_dict.get('__optional_keys__', ())) + readonly_keys.update(base_dict.get('__readonly_keys__', ())) + mutable_keys.update(base_dict.get('__mutable_keys__', ())) + base_extra_items_type = base_dict.get('__extra_items__', None) + if base_extra_items_type is not None: + extra_items_type = base_extra_items_type + + if closed and extra_items_type is None: + extra_items_type = Never + if closed and "__extra_items__" in own_annotations: + annotation_type = own_annotations.pop("__extra_items__") + qualifiers = set(_get_typeddict_qualifiers(annotation_type)) + if Required in qualifiers: + raise TypeError( + "Special key __extra_items__ does not support " + "Required" + ) + if NotRequired in qualifiers: + raise TypeError( + "Special key __extra_items__ does not support " + "NotRequired" + ) + extra_items_type = annotation_type + + annotations.update(own_annotations) + for annotation_key, annotation_type in own_annotations.items(): + qualifiers = set(_get_typeddict_qualifiers(annotation_type)) + + if Required in qualifiers: + required_keys.add(annotation_key) + elif NotRequired in qualifiers: + optional_keys.add(annotation_key) + elif total: + required_keys.add(annotation_key) + else: + optional_keys.add(annotation_key) + if ReadOnly in qualifiers: + mutable_keys.discard(annotation_key) + readonly_keys.add(annotation_key) + else: + mutable_keys.add(annotation_key) + readonly_keys.discard(annotation_key) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + tp_dict.__readonly_keys__ = frozenset(readonly_keys) + tp_dict.__mutable_keys__ = frozenset(mutable_keys) + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + tp_dict.__closed__ = closed + tp_dict.__extra_items__ = extra_items_type + return tp_dict + + __call__ = dict # static method + + def __subclasscheck__(cls, other): + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + + __instancecheck__ = __subclasscheck__ + + _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {}) + + @_ensure_subclassable(lambda bases: (_TypedDict,)) + def TypedDict(typename, fields=_marker, /, *, total=True, closed=False, **kwargs): + """A simple typed namespace. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type such that a type checker will expect all + instances to have a certain set of keys, where each key is + associated with a value of a consistent type. This expectation + is not checked at runtime. + + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports an additional equivalent form:: + + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + By default, all keys must be present in a TypedDict. It is possible + to override this by specifying totality:: + + class Point2D(TypedDict, total=False): + x: int + y: int + + This means that a Point2D TypedDict can have any of the keys omitted. A type + checker is only expected to support a literal False or True as the value of + the total argument. True is the default, and makes all items defined in the + class body be required. + + The Required and NotRequired special forms can also be used to mark + individual keys as being required or not required:: + + class Point2D(TypedDict): + x: int # the "x" key must always be present (Required is the default) + y: NotRequired[int] # the "y" key can be omitted + + See PEP 655 for more details on Required and NotRequired. + """ + if fields is _marker or fields is None: + if fields is _marker: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + + example = f"`{typename} = TypedDict({typename!r}, {{}})`" + deprecation_msg = ( + f"{deprecated_thing} is deprecated and will be disallowed in " + "Python 3.15. To create a TypedDict class with 0 fields " + "using the functional syntax, pass an empty dictionary, e.g. " + ) + example + "." + warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2) + if closed is not False and closed is not True: + kwargs["closed"] = closed + closed = False + fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + if kwargs: + if sys.version_info >= (3, 13): + raise TypeError("TypedDict takes no keyword arguments") + warnings.warn( + "The kwargs-based syntax for TypedDict definitions is deprecated " + "in Python 3.11, will be removed in Python 3.13, and may not be " + "understood by third-party type checkers.", + DeprecationWarning, + stacklevel=2, + ) + + ns = {'__annotations__': dict(fields)} + module = _caller() + if module is not None: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = module + + td = _TypedDictMeta(typename, (), ns, total=total, closed=closed) + td.__orig_bases__ = (TypedDict,) + return td + + if hasattr(typing, "_TypedDictMeta"): + _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) + else: + _TYPEDDICT_TYPES = (_TypedDictMeta,) + + def is_typeddict(tp): + """Check if an annotation is a TypedDict class + + For example:: + class Film(TypedDict): + title: str + year: int + + is_typeddict(Film) # => True + is_typeddict(Union[list, str]) # => False + """ + # On 3.8, this would otherwise return True + if hasattr(typing, "TypedDict") and tp is typing.TypedDict: + return False + return isinstance(tp, _TYPEDDICT_TYPES) + + +if hasattr(typing, "assert_type"): + assert_type = typing.assert_type + +else: + def assert_type(val, typ, /): + """Assert (to the type checker) that the value is of the given type. + + When the type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # ok + assert_type(name, int) # type checker error + + At runtime this returns the first argument unchanged and otherwise + does nothing. + """ + return val + + +if hasattr(typing, "ReadOnly"): # 3.13+ + get_type_hints = typing.get_type_hints +else: # <=3.13 + # replaces _strip_annotations() + def _strip_extras(t): + """Strips Annotated, Required and NotRequired from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_extras(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly): + return _strip_extras(t.__args__[0]) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return _types.GenericAlias(t.__origin__, stripped_args) + if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' + (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if hasattr(typing, "Annotated"): # 3.9+ + hint = typing.get_type_hints( + obj, globalns=globalns, localns=localns, include_extras=True + ) + else: # 3.8 + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_extras(t) for k, t in hint.items()} + + +# Python 3.9+ has PEP 593 (Annotated) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.8 +else: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + allowed_special_forms = (ClassVar, Final) + if get_origin(params[0]) in allowed_special_forms: + origin = params[0] + else: + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those. Python 3.9's versions don't support +# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. +if sys.version_info[:2] >= (3, 10): + get_origin = typing.get_origin + get_args = typing.get_args +# 3.8-3.9 +else: + try: + # 3.9+ + from typing import _BaseGenericAlias + except ImportError: + _BaseGenericAlias = typing._GenericAlias + try: + # 3.9+ + from typing import GenericAlias as _typing_GenericAlias + except ImportError: + _typing_GenericAlias = typing._GenericAlias + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is typing.Generic: + return typing.Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): + if getattr(tp, "_special", False): + return () + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +# 3.10+ +if hasattr(typing, 'TypeAlias'): + TypeAlias = typing.TypeAlias +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_ExtensionsSpecialForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") +# 3.8 +else: + TypeAlias = _ExtensionsSpecialForm( + 'TypeAlias', + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""" + ) + + +def _set_default(type_param, default): + if isinstance(default, (tuple, list)): + type_param.__default__ = tuple((typing._type_check(d, "Default must be a type") + for d in default)) + elif default != _marker: + if isinstance(type_param, ParamSpec) and default is ...: # ... not valid <3.11 + type_param.__default__ = default + else: + type_param.__default__ = typing._type_check(default, "Default must be a type") + else: + type_param.__default__ = None + + +def _set_module(typevarlike): + # for pickling: + def_mod = _caller(depth=3) + if def_mod != 'typing_extensions': + typevarlike.__module__ = def_mod + + +class _DefaultMixin: + """Mixin for TypeVarLike defaults.""" + + __slots__ = () + __init__ = _set_default + + +# Classes using this metaclass must provide a _backported_typevarlike ClassVar +class _TypeVarLikeMeta(type): + def __instancecheck__(cls, __instance: Any) -> bool: + return isinstance(__instance, cls._backported_typevarlike) + + +# Add default and infer_variance parameters from PEP 696 and 695 +class TypeVar(metaclass=_TypeVarLikeMeta): + """Type variable.""" + + _backported_typevarlike = typing.TypeVar + + def __new__(cls, name, *constraints, bound=None, + covariant=False, contravariant=False, + default=_marker, infer_variance=False): + if hasattr(typing, "TypeAliasType"): + # PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar + typevar = typing.TypeVar(name, *constraints, bound=bound, + covariant=covariant, contravariant=contravariant, + infer_variance=infer_variance) + else: + typevar = typing.TypeVar(name, *constraints, bound=bound, + covariant=covariant, contravariant=contravariant) + if infer_variance and (covariant or contravariant): + raise ValueError("Variance cannot be specified with infer_variance.") + typevar.__infer_variance__ = infer_variance + _set_default(typevar, default) + _set_module(typevar) + return typevar + + def __init_subclass__(cls) -> None: + raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type") + + +# Python 3.10+ has PEP 612 +if hasattr(typing, 'ParamSpecArgs'): + ParamSpecArgs = typing.ParamSpecArgs + ParamSpecKwargs = typing.ParamSpecKwargs +# 3.8-3.9 +else: + class _Immutable: + """Mixin to indicate that object should not be copied.""" + __slots__ = () + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + class ParamSpecArgs(_Immutable): + """The args for a ParamSpec object. + + Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. + + ParamSpecArgs objects have a reference back to their ParamSpec: + + P.args.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.args" + + def __eq__(self, other): + if not isinstance(other, ParamSpecArgs): + return NotImplemented + return self.__origin__ == other.__origin__ + + class ParamSpecKwargs(_Immutable): + """The kwargs for a ParamSpec object. + + Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. + + ParamSpecKwargs objects have a reference back to their ParamSpec: + + P.kwargs.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.kwargs" + + def __eq__(self, other): + if not isinstance(other, ParamSpecKwargs): + return NotImplemented + return self.__origin__ == other.__origin__ + +# 3.10+ +if hasattr(typing, 'ParamSpec'): + + # Add default parameter - PEP 696 + class ParamSpec(metaclass=_TypeVarLikeMeta): + """Parameter specification.""" + + _backported_typevarlike = typing.ParamSpec + + def __new__(cls, name, *, bound=None, + covariant=False, contravariant=False, + infer_variance=False, default=_marker): + if hasattr(typing, "TypeAliasType"): + # PEP 695 implemented, can pass infer_variance to typing.TypeVar + paramspec = typing.ParamSpec(name, bound=bound, + covariant=covariant, + contravariant=contravariant, + infer_variance=infer_variance) + else: + paramspec = typing.ParamSpec(name, bound=bound, + covariant=covariant, + contravariant=contravariant) + paramspec.__infer_variance__ = infer_variance + + _set_default(paramspec, default) + _set_module(paramspec) + return paramspec + + def __init_subclass__(cls) -> None: + raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type") + +# 3.8-3.9 +else: + + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class ParamSpec(list, _DefaultMixin): + """Parameter specification variable. + + Usage:: + + P = ParamSpec('P') + + Parameter specification variables exist primarily for the benefit of static + type checkers. They are used to forward the parameter types of one + callable to another callable, a pattern commonly found in higher order + functions and decorators. They are only valid when used in ``Concatenate``, + or s the first argument to ``Callable``. In Python 3.10 and higher, + they are also supported in user-defined Generics at runtime. + See class Generic for more information on generic types. An + example for annotating a decorator:: + + T = TypeVar('T') + P = ParamSpec('P') + + def add_logging(f: Callable[P, T]) -> Callable[P, T]: + '''A type-safe decorator to add logging to a function.''' + def inner(*args: P.args, **kwargs: P.kwargs) -> T: + logging.info(f'{f.__name__} was called') + return f(*args, **kwargs) + return inner + + @add_logging + def add_two(x: float, y: float) -> float: + '''Add two numbers together.''' + return x + y + + Parameter specification variables defined with covariant=True or + contravariant=True can be used to declare covariant or contravariant + generic types. These keyword arguments are valid, but their actual semantics + are yet to be decided. See PEP 612 for details. + + Parameter specification variables can be introspected. e.g.: + + P.__name__ == 'T' + P.__bound__ == None + P.__covariant__ == False + P.__contravariant__ == False + + Note that only parameter specification variables defined in global scope can + be pickled. + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + @property + def args(self): + return ParamSpecArgs(self) + + @property + def kwargs(self): + return ParamSpecKwargs(self) + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + infer_variance=False, default=_marker): + super().__init__([self]) + self.__name__ = name + self.__covariant__ = bool(covariant) + self.__contravariant__ = bool(contravariant) + self.__infer_variance__ = bool(infer_variance) + if bound: + self.__bound__ = typing._type_check(bound, 'Bound must be a type.') + else: + self.__bound__ = None + _DefaultMixin.__init__(self, default) + + # for pickling: + def_mod = _caller() + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __repr__(self): + if self.__infer_variance__: + prefix = '' + elif self.__covariant__: + prefix = '+' + elif self.__contravariant__: + prefix = '-' + else: + prefix = '~' + return prefix + self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + # Hack to get typing._type_check to pass. + def __call__(self, *args, **kwargs): + pass + + +# 3.8-3.9 +if not hasattr(typing, 'Concatenate'): + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class _ConcatenateGenericAlias(list): + + # Trick Generic into looking into this for __parameters__. + __class__ = typing._GenericAlias + + # Flag in 3.8. + _special = False + + def __init__(self, origin, args): + super().__init__(args) + self.__origin__ = origin + self.__args__ = args + + def __repr__(self): + _type_repr = typing._type_repr + return (f'{_type_repr(self.__origin__)}' + f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + # Hack to get typing._type_check to pass in Generic. + def __call__(self, *args, **kwargs): + pass + + @property + def __parameters__(self): + return tuple( + tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) + ) + + +# 3.8-3.9 +@typing._tp_cache +def _concatenate_getitem(self, parameters): + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not isinstance(parameters[-1], ParamSpec): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = tuple(typing._type_check(p, msg) for p in parameters) + return _ConcatenateGenericAlias(self, parameters) + + +# 3.10+ +if hasattr(typing, 'Concatenate'): + Concatenate = typing.Concatenate + _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa: F811 +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_ExtensionsSpecialForm + def Concatenate(self, parameters): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + return _concatenate_getitem(self, parameters) +# 3.8 +else: + class _ConcatenateForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateForm( + 'Concatenate', + doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """) + +# 3.10+ +if hasattr(typing, 'TypeGuard'): + TypeGuard = typing.TypeGuard +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_ExtensionsSpecialForm + def TypeGuard(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = typing._type_check(parameters, f'{self} accepts only a single type.') + return typing._GenericAlias(self, (item,)) +# 3.8 +else: + class _TypeGuardForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeGuard = _TypeGuardForm( + 'TypeGuard', + doc="""Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """) + +# 3.13+ +if hasattr(typing, 'TypeIs'): + TypeIs = typing.TypeIs +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_ExtensionsSpecialForm + def TypeIs(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type narrower function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeIs[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeIs`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the type inside ``TypeGuard`` and the argument's + previously known type. + + For example:: + + def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: + return hasattr(val, '__await__') + + def f(val: Union[int, Awaitable[int]]) -> int: + if is_awaitable(val): + assert_type(val, Awaitable[int]) + else: + assert_type(val, int) + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with TypeIs). + """ + item = typing._type_check(parameters, f'{self} accepts only a single type.') + return typing._GenericAlias(self, (item,)) +# 3.8 +else: + class _TypeIsForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeIs = _TypeIsForm( + 'TypeIs', + doc="""Special typing form used to annotate the return type of a user-defined + type narrower function. ``TypeIs`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeIs[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeIs`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the intersection of the type inside ``TypeGuard`` and the argument's + previously known type. + + For example:: + + def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]: + return hasattr(val, '__await__') + + def f(val: Union[int, Awaitable[int]]) -> int: + if is_awaitable(val): + assert_type(val, Awaitable[int]) + else: + assert_type(val, int) + + ``TypeIs`` also works with type variables. For more information, see + PEP 742 (Narrowing types with TypeIs). + """) + + +# Vendored from cpython typing._SpecialFrom +class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return f'typing_extensions.{self._name}' + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +if hasattr(typing, "LiteralString"): # 3.11+ + LiteralString = typing.LiteralString +else: + @_SpecialForm + def LiteralString(self, params): + """Represents an arbitrary literal string. + + Example:: + + from typing_extensions import LiteralString + + def query(sql: LiteralString) -> ...: + ... + + query("SELECT * FROM table") # ok + query(f"SELECT * FROM {input()}") # not ok + + See PEP 675 for details. + + """ + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Self"): # 3.11+ + Self = typing.Self +else: + @_SpecialForm + def Self(self, params): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Never"): # 3.11+ + Never = typing.Never +else: + @_SpecialForm + def Never(self, params): + """The bottom type, a type that has no members. + + This can be used to define a function that should never be + called, or a function that never returns:: + + from typing_extensions import Never + + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # ok, arg is of type Never + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, 'Required'): # 3.11+ + Required = typing.Required + NotRequired = typing.NotRequired +elif sys.version_info[:2] >= (3, 9): # 3.9-3.10 + @_ExtensionsSpecialForm + def Required(self, parameters): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + @_ExtensionsSpecialForm + def NotRequired(self, parameters): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + +else: # 3.8 + class _RequiredForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + Required = _RequiredForm( + 'Required', + doc="""A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """) + NotRequired = _RequiredForm( + 'NotRequired', + doc="""A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """) + + +if hasattr(typing, 'ReadOnly'): + ReadOnly = typing.ReadOnly +elif sys.version_info[:2] >= (3, 9): # 3.9-3.12 + @_ExtensionsSpecialForm + def ReadOnly(self, parameters): + """A special typing construct to mark an item of a TypedDict as read-only. + + For example: + + class Movie(TypedDict): + title: ReadOnly[str] + year: int + + def mutate_movie(m: Movie) -> None: + m["year"] = 1992 # allowed + m["title"] = "The Matrix" # typechecker error + + There is no runtime checking for this property. + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + +else: # 3.8 + class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + ReadOnly = _ReadOnlyForm( + 'ReadOnly', + doc="""A special typing construct to mark a key of a TypedDict as read-only. + + For example: + + class Movie(TypedDict): + title: ReadOnly[str] + year: int + + def mutate_movie(m: Movie) -> None: + m["year"] = 1992 # allowed + m["title"] = "The Matrix" # typechecker error + + There is no runtime checking for this propery. + """) + + +_UNPACK_DOC = """\ +Type unpack operator. + +The type unpack operator takes the child types from some container type, +such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For +example: + + # For some generic class `Foo`: + Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str] + + Ts = TypeVarTuple('Ts') + # Specifies that `Bar` is generic in an arbitrary number of types. + # (Think of `Ts` as a tuple of an arbitrary number of individual + # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the + # `Generic[]`.) + class Bar(Generic[Unpack[Ts]]): ... + Bar[int] # Valid + Bar[int, str] # Also valid + +From Python 3.11, this can also be done using the `*` operator: + + Foo[*tuple[int, str]] + class Bar(Generic[*Ts]): ... + +The operator can also be used along with a `TypedDict` to annotate +`**kwargs` in a function signature. For instance: + + class Movie(TypedDict): + name: str + year: int + + # This function expects two keyword arguments - *name* of type `str` and + # *year* of type `int`. + def foo(**kwargs: Unpack[Movie]): ... + +Note that there is only some runtime checking of this operator. Not +everything the runtime allows may be accepted by static type checkers. + +For more information, see PEP 646 and PEP 692. +""" + + +if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[] + Unpack = typing.Unpack + + def _is_unpack(obj): + return get_origin(obj) is Unpack + +elif sys.version_info[:2] >= (3, 9): # 3.9+ + class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True): + def __init__(self, getitem): + super().__init__(getitem) + self.__doc__ = _UNPACK_DOC + + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + @_UnpackSpecialForm + def Unpack(self, parameters): + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + +else: # 3.8 + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + class _UnpackForm(_ExtensionsSpecialForm, _root=True): + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + + +if hasattr(typing, "TypeVarTuple"): # 3.11+ + + # Add default parameter - PEP 696 + class TypeVarTuple(metaclass=_TypeVarLikeMeta): + """Type variable tuple.""" + + _backported_typevarlike = typing.TypeVarTuple + + def __new__(cls, name, *, default=_marker): + tvt = typing.TypeVarTuple(name) + _set_default(tvt, default) + _set_module(tvt) + return tvt + + def __init_subclass__(self, *args, **kwds): + raise TypeError("Cannot subclass special typing classes") + +else: # <=3.10 + class TypeVarTuple(_DefaultMixin): + """Type variable tuple. + + Usage:: + + Ts = TypeVarTuple('Ts') + + In the same way that a normal type variable is a stand-in for a single + type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* + type such as ``Tuple[int, str]``. + + Type variable tuples can be used in ``Generic`` declarations. + Consider the following example:: + + class Array(Generic[*Ts]): ... + + The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, + where ``T1`` and ``T2`` are type variables. To use these type variables + as type parameters of ``Array``, we must *unpack* the type variable tuple using + the star operator: ``*Ts``. The signature of ``Array`` then behaves + as if we had simply written ``class Array(Generic[T1, T2]): ...``. + In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows + us to parameterise the class with an *arbitrary* number of type parameters. + + Type variable tuples can be used anywhere a normal ``TypeVar`` can. + This includes class definitions, as shown above, as well as function + signatures and variable annotations:: + + class Array(Generic[*Ts]): + + def __init__(self, shape: Tuple[*Ts]): + self._shape: Tuple[*Ts] = shape + + def get_shape(self) -> Tuple[*Ts]: + return self._shape + + shape = (Height(480), Width(640)) + x: Array[Height, Width] = Array(shape) + y = abs(x) # Inferred type is Array[Height, Width] + z = x + x # ... is Array[Height, Width] + x.get_shape() # ... is tuple[Height, Width] + + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + def __iter__(self): + yield self.__unpacked__ + + def __init__(self, name, *, default=_marker): + self.__name__ = name + _DefaultMixin.__init__(self, default) + + # for pickling: + def_mod = _caller() + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + self.__unpacked__ = Unpack[self] + + def __repr__(self): + return self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + def __init_subclass__(self, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") + + +if hasattr(typing, "reveal_type"): # 3.11+ + reveal_type = typing.reveal_type +else: # <=3.10 + def reveal_type(obj: T, /) -> T: + """Reveal the inferred type of a variable. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., ``mypy``) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns it unchanged. + + """ + print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr) + return obj + + +if hasattr(typing, "assert_never"): # 3.11+ + assert_never = typing.assert_never +else: # <=3.10 + def assert_never(arg: Never, /) -> Never: + """Assert to the type checker that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + + """ + raise AssertionError("Expected code to be unreachable") + + +if sys.version_info >= (3, 12): # 3.12+ + # dataclass_transform exists in 3.11 but lacks the frozen_default parameter + dataclass_transform = typing.dataclass_transform +else: # <=3.11 + def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + frozen_default: bool = False, + field_specifiers: typing.Tuple[ + typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], + ... + ] = (), + **kwargs: typing.Any, + ) -> typing.Callable[[T], T]: + """Decorator that marks a function, class, or metaclass as providing + dataclass-like behavior. + + Example: + + from typing_extensions import dataclass_transform + + _T = TypeVar("_T") + + # Used on a decorator function + @dataclass_transform() + def create_model(cls: type[_T]) -> type[_T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + # Used on a base class + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + # Used on a metaclass + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + Each of the ``CustomerModel`` classes defined in this example will now + behave similarly to a dataclass created with the ``@dataclasses.dataclass`` + decorator. For example, the type checker will synthesize an ``__init__`` + method. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + True or False if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``frozen_default`` indicates whether the ``frozen`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + + See PEP 681 for details. + + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "frozen_default": frozen_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +if hasattr(typing, "override"): # 3.12+ + override = typing.override +else: # <=3.11 + _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) + + def override(arg: _F, /) -> _F: + """Indicate that a method is intended to override a method in a base class. + + Usage: + + class Base: + def method(self) -> None: + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method with the same name on a base class. + This helps prevent bugs that may occur when a base class is changed + without an equivalent change to a child class. + + There is no runtime checking of these properties. The decorator + sets the ``__override__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + + See PEP 698 for details. + + """ + try: + arg.__override__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return arg + + +if hasattr(warnings, "deprecated"): + deprecated = warnings.deprecated +else: + _T = typing.TypeVar("_T") + + class deprecated: + """Indicate that a class, function or overload is deprecated. + + When this decorator is applied to an object, the type checker + will generate a diagnostic on usage of the deprecated object. + + Usage: + + @deprecated("Use B instead") + class A: + pass + + @deprecated("Use g instead") + def f(): + pass + + @overload + @deprecated("int support is deprecated") + def g(x: int) -> int: ... + @overload + def g(x: str) -> int: ... + + The warning specified by *category* will be emitted at runtime + on use of deprecated objects. For functions, that happens on calls; + for classes, on instantiation and on creation of subclasses. + If the *category* is ``None``, no warning is emitted at runtime. + The *stacklevel* determines where the + warning is emitted. If it is ``1`` (the default), the warning + is emitted at the direct caller of the deprecated object; if it + is higher, it is emitted further up the stack. + Static type checker behavior is not affected by the *category* + and *stacklevel* arguments. + + The deprecation message passed to the decorator is saved in the + ``__deprecated__`` attribute on the decorated object. + If applied to an overload, the decorator + must be after the ``@overload`` decorator for the attribute to + exist on the overload as returned by ``get_overloads()``. + + See PEP 702 for details. + + """ + def __init__( + self, + message: str, + /, + *, + category: typing.Optional[typing.Type[Warning]] = DeprecationWarning, + stacklevel: int = 1, + ) -> None: + if not isinstance(message, str): + raise TypeError( + "Expected an object of type str for 'message', not " + f"{type(message).__name__!r}" + ) + self.message = message + self.category = category + self.stacklevel = stacklevel + + def __call__(self, arg: _T, /) -> _T: + # Make sure the inner functions created below don't + # retain a reference to self. + msg = self.message + category = self.category + stacklevel = self.stacklevel + if category is None: + arg.__deprecated__ = msg + return arg + elif isinstance(arg, type): + import functools + from types import MethodType + + original_new = arg.__new__ + + @functools.wraps(original_new) + def __new__(cls, *args, **kwargs): + if cls is arg: + warnings.warn(msg, category=category, stacklevel=stacklevel + 1) + if original_new is not object.__new__: + return original_new(cls, *args, **kwargs) + # Mirrors a similar check in object.__new__. + elif cls.__init__ is object.__init__ and (args or kwargs): + raise TypeError(f"{cls.__name__}() takes no arguments") + else: + return original_new(cls) + + arg.__new__ = staticmethod(__new__) + + original_init_subclass = arg.__init_subclass__ + # We need slightly different behavior if __init_subclass__ + # is a bound method (likely if it was implemented in Python) + if isinstance(original_init_subclass, MethodType): + original_init_subclass = original_init_subclass.__func__ + + @functools.wraps(original_init_subclass) + def __init_subclass__(*args, **kwargs): + warnings.warn(msg, category=category, stacklevel=stacklevel + 1) + return original_init_subclass(*args, **kwargs) + + arg.__init_subclass__ = classmethod(__init_subclass__) + # Or otherwise, which likely means it's a builtin such as + # object's implementation of __init_subclass__. + else: + @functools.wraps(original_init_subclass) + def __init_subclass__(*args, **kwargs): + warnings.warn(msg, category=category, stacklevel=stacklevel + 1) + return original_init_subclass(*args, **kwargs) + + arg.__init_subclass__ = __init_subclass__ + + arg.__deprecated__ = __new__.__deprecated__ = msg + __init_subclass__.__deprecated__ = msg + return arg + elif callable(arg): + import functools + + @functools.wraps(arg) + def wrapper(*args, **kwargs): + warnings.warn(msg, category=category, stacklevel=stacklevel + 1) + return arg(*args, **kwargs) + + arg.__deprecated__ = wrapper.__deprecated__ = msg + return wrapper + else: + raise TypeError( + "@deprecated decorator with non-None category must be applied to " + f"a class or callable, not {arg!r}" + ) + + +# We have to do some monkey patching to deal with the dual nature of +# Unpack/TypeVarTuple: +# - We want Unpack to be a kind of TypeVar so it gets accepted in +# Generic[Unpack[Ts]] +# - We want it to *not* be treated as a TypeVar for the purposes of +# counting generic parameters, so that when we subscript a generic, +# the runtime doesn't try to substitute the Unpack with the subscripted type. +if not hasattr(typing, "TypeVarTuple"): + def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + things = "arguments" if sys.version_info >= (3, 10) else "parameters" + raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}" + f" for {cls}; actual {alen}, expected {expect_val}") +else: + # Python 3.11+ + + def _check_generic(cls, parameters, elen): + """Check correct count for parameters of a generic cls (internal helper). + + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + alen = len(parameters) + if alen != elen: + expect_val = elen + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + + # deal with TypeVarLike defaults + # required TypeVarLikes cannot appear after a defaulted one. + if alen < elen: + # since we validate TypeVarLike default in _collect_type_vars + # or _collect_parameters we can safely check parameters[alen] + if getattr(parameters[alen], '__default__', None) is not None: + return + + num_default_tv = sum(getattr(p, '__default__', None) + is not None for p in parameters) + + elen -= num_default_tv + + expect_val = f"at least {elen}" + + raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments" + f" for {cls}; actual {alen}, expected {expect_val}") + +typing._check_generic = _check_generic + +# Python 3.11+ _collect_type_vars was renamed to _collect_parameters +if hasattr(typing, '_collect_type_vars'): + def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + + typing._collect_type_vars = _collect_type_vars +else: + def _collect_parameters(args): + """Collect all type variables and parameter specifications in args + in order of first appearance (lexicographic order). + + For example:: + + assert _collect_parameters((T, Callable[P, T])) == (T, P) + """ + parameters = [] + # required TypeVarLike cannot appear after TypeVarLike with default + default_encountered = False + for t in args: + if isinstance(t, type): + # We don't want __parameters__ descriptor of a bare Python class. + pass + elif isinstance(t, tuple): + # `t` might be a tuple, when `ParamSpec` is substituted with + # `[T, int]`, or `[int, *Ts]`, etc. + for x in t: + for collected in _collect_parameters([x]): + if collected not in parameters: + parameters.append(collected) + elif hasattr(t, '__typing_subst__'): + if t not in parameters: + if getattr(t, '__default__', None) is not None: + default_encountered = True + elif default_encountered: + raise TypeError(f'Type parameter {t!r} without a default' + ' follows type parameter with a default') + + parameters.append(t) + else: + for x in getattr(t, '__parameters__', ()): + if x not in parameters: + parameters.append(x) + + return tuple(parameters) + + typing._collect_parameters = _collect_parameters + +# Backport typing.NamedTuple as it exists in Python 3.13. +# In 3.11, the ability to define generic `NamedTuple`s was supported. +# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. +# On 3.12, we added __orig_bases__ to call-based NamedTuples +# On 3.13, we deprecated kwargs-based NamedTuples +if sys.version_info >= (3, 13): + NamedTuple = typing.NamedTuple +else: + def _make_nmtuple(name, types, module, defaults=()): + fields = [n for n, t in types] + annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") + for n, t in types} + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations + # The `_field_types` attribute was removed in 3.9; + # in earlier versions, it is the same as the `__annotations__` attribute + if sys.version_info < (3, 9): + nm_tpl._field_types = annotations + return nm_tpl + + _prohibited_namedtuple_fields = typing._prohibited + _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) + + class _NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + for base in bases: + if base is not _NamedTuple and base is not typing.Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + types = ns.get('__annotations__', {}) + default_names = [] + for field_name in types: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple( + typename, types.items(), + defaults=[ns[n] for n in default_names], + module=ns['__module__'] + ) + nm_tpl.__bases__ = bases + if typing.Generic in bases: + if hasattr(typing, '_generic_class_getitem'): # 3.12+ + nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem) + else: + class_getitem = typing.Generic.__class_getitem__.__func__ + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key, val in ns.items(): + if key in _prohibited_namedtuple_fields: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special_namedtuple_fields: + if key not in nm_tpl._fields: + setattr(nm_tpl, key, ns[key]) + try: + set_name = type(val).__set_name__ + except AttributeError: + pass + else: + try: + set_name(val, nm_tpl, key) + except BaseException as e: + msg = ( + f"Error calling __set_name__ on {type(val).__name__!r} " + f"instance {key!r} in {typename!r}" + ) + # BaseException.add_note() existed on py311, + # but the __set_name__ machinery didn't start + # using add_note() until py312. + # Making sure exceptions are raised in the same way + # as in "normal" classes seems most important here. + if sys.version_info >= (3, 12): + e.add_note(msg) + raise + else: + raise RuntimeError(msg) from e + + if typing.Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) + + def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + + @_ensure_subclassable(_namedtuple_mro_entries) + def NamedTuple(typename, fields=_marker, /, **kwargs): + """Typed version of namedtuple. + + Usage:: + + class Employee(NamedTuple): + name: str + id: int + + This is equivalent to:: + + Employee = collections.namedtuple('Employee', ['name', 'id']) + + The resulting class has an extra __annotations__ attribute, giving a + dict that maps field names to types. (The field names are also in + the _fields attribute, which is part of the namedtuple API.) + An alternative equivalent functional syntax is also accepted:: + + Employee = NamedTuple('Employee', [('name', str), ('id', int)]) + """ + if fields is _marker: + if kwargs: + deprecated_thing = "Creating NamedTuple classes using keyword arguments" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "Use the class-based or functional syntax instead." + ) + else: + deprecated_thing = "Failing to pass a value for the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif fields is None: + if kwargs: + raise TypeError( + "Cannot pass `None` as the 'fields' parameter " + "and also specify fields using keyword arguments" + ) + else: + deprecated_thing = "Passing `None` as the 'fields' parameter" + example = f"`{typename} = NamedTuple({typename!r}, [])`" + deprecation_msg = ( + "{name} is deprecated and will be disallowed in Python {remove}. " + "To create a NamedTuple class with 0 fields " + "using the functional syntax, " + "pass an empty list, e.g. " + ) + example + "." + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + if fields is _marker or fields is None: + warnings.warn( + deprecation_msg.format(name=deprecated_thing, remove="3.15"), + DeprecationWarning, + stacklevel=2, + ) + fields = kwargs.items() + nt = _make_nmtuple(typename, fields, module=_caller()) + nt.__orig_bases__ = (NamedTuple,) + return nt + + +if hasattr(collections.abc, "Buffer"): + Buffer = collections.abc.Buffer +else: + class Buffer(abc.ABC): + """Base class for classes that implement the buffer protocol. + + The buffer protocol allows Python objects to expose a low-level + memory buffer interface. Before Python 3.12, it is not possible + to implement the buffer protocol in pure Python code, or even + to check whether a class implements the buffer protocol. In + Python 3.12 and higher, the ``__buffer__`` method allows access + to the buffer protocol from Python code, and the + ``collections.abc.Buffer`` ABC allows checking whether a class + implements the buffer protocol. + + To indicate support for the buffer protocol in earlier versions, + inherit from this ABC, either in a stub file or at runtime, + or use ABC registration. This ABC provides no methods, because + there is no Python-accessible methods shared by pre-3.12 buffer + classes. It is useful primarily for static checks. + + """ + + # As a courtesy, register the most common stdlib buffer classes. + Buffer.register(memoryview) + Buffer.register(bytearray) + Buffer.register(bytes) + + +# Backport of types.get_original_bases, available on 3.12+ in CPython +if hasattr(_types, "get_original_bases"): + get_original_bases = _types.get_original_bases +else: + def get_original_bases(cls, /): + """Return the class's "original" bases prior to modification by `__mro_entries__`. + + Examples:: + + from typing import TypeVar, Generic + from typing_extensions import NamedTuple, TypedDict + + T = TypeVar("T") + class Foo(Generic[T]): ... + class Bar(Foo[int], float): ... + class Baz(list[str]): ... + Eggs = NamedTuple("Eggs", [("a", int), ("b", str)]) + Spam = TypedDict("Spam", {"a": int, "b": str}) + + assert get_original_bases(Bar) == (Foo[int], float) + assert get_original_bases(Baz) == (list[str],) + assert get_original_bases(Eggs) == (NamedTuple,) + assert get_original_bases(Spam) == (TypedDict,) + assert get_original_bases(int) == (object,) + """ + try: + return cls.__dict__.get("__orig_bases__", cls.__bases__) + except AttributeError: + raise TypeError( + f'Expected an instance of type, not {type(cls).__name__!r}' + ) from None + + +# NewType is a class on Python 3.10+, making it pickleable +# The error message for subclassing instances of NewType was improved on 3.11+ +if sys.version_info >= (3, 11): + NewType = typing.NewType +else: + class NewType: + """NewType creates simple unique types with almost zero + runtime overhead. NewType(name, tp) is considered a subtype of tp + by static type checkers. At runtime, NewType(name, tp) returns + a dummy callable that simply returns its argument. Usage:: + UserId = NewType('UserId', int) + def name_by_id(user_id: UserId) -> str: + ... + UserId('user') # Fails type check + name_by_id(42) # Fails type check + name_by_id(UserId(42)) # OK + num = UserId(5) + 1 # type: int + """ + + def __call__(self, obj, /): + return obj + + def __init__(self, name, tp): + self.__qualname__ = name + if '.' in name: + name = name.rpartition('.')[-1] + self.__name__ = name + self.__supertype__ = tp + def_mod = _caller() + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __mro_entries__(self, bases): + # We defined __mro_entries__ to get a better error message + # if a user attempts to subclass a NewType instance. bpo-46170 + supercls_name = self.__name__ + + class Dummy: + def __init_subclass__(cls): + subcls_name = cls.__name__ + raise TypeError( + f"Cannot subclass an instance of NewType. " + f"Perhaps you were looking for: " + f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`" + ) + + return (Dummy,) + + def __repr__(self): + return f'{self.__module__}.{self.__qualname__}' + + def __reduce__(self): + return self.__qualname__ + + if sys.version_info >= (3, 10): + # PEP 604 methods + # It doesn't make sense to have these methods on Python <3.10 + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + +if hasattr(typing, "TypeAliasType"): + TypeAliasType = typing.TypeAliasType +else: + def _is_unionable(obj): + """Corresponds to is_unionable() in unionobject.c in CPython.""" + return obj is None or isinstance(obj, ( + type, + _types.GenericAlias, + _types.UnionType, + TypeAliasType, + )) + + class TypeAliasType: + """Create named, parameterized type aliases. + + This provides a backport of the new `type` statement in Python 3.12: + + type ListOrSet[T] = list[T] | set[T] + + is equivalent to: + + T = TypeVar("T") + ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,)) + + The name ListOrSet can then be used as an alias for the type it refers to. + + The type_params argument should contain all the type parameters used + in the value of the type alias. If the alias is not generic, this + argument is omitted. + + Static type checkers should only support type aliases declared using + TypeAliasType that follow these rules: + + - The first argument (the name) must be a string literal. + - The TypeAliasType instance must be immediately assigned to a variable + of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid, + as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)'). + + """ + + def __init__(self, name: str, value, *, type_params=()): + if not isinstance(name, str): + raise TypeError("TypeAliasType name must be a string") + self.__value__ = value + self.__type_params__ = type_params + + parameters = [] + for type_param in type_params: + if isinstance(type_param, TypeVarTuple): + parameters.extend(type_param) + else: + parameters.append(type_param) + self.__parameters__ = tuple(parameters) + def_mod = _caller() + if def_mod != 'typing_extensions': + self.__module__ = def_mod + # Setting this attribute closes the TypeAliasType from further modification + self.__name__ = name + + def __setattr__(self, name: str, value: object, /) -> None: + if hasattr(self, "__name__"): + self._raise_attribute_error(name) + super().__setattr__(name, value) + + def __delattr__(self, name: str, /) -> Never: + self._raise_attribute_error(name) + + def _raise_attribute_error(self, name: str) -> Never: + # Match the Python 3.12 error messages exactly + if name == "__name__": + raise AttributeError("readonly attribute") + elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}: + raise AttributeError( + f"attribute '{name}' of 'typing.TypeAliasType' objects " + "is not writable" + ) + else: + raise AttributeError( + f"'typing.TypeAliasType' object has no attribute '{name}'" + ) + + def __repr__(self) -> str: + return self.__name__ + + def __getitem__(self, parameters): + if not isinstance(parameters, tuple): + parameters = (parameters,) + parameters = [ + typing._type_check( + item, f'Subscripting {self.__name__} requires a type.' + ) + for item in parameters + ] + return typing._GenericAlias(self, tuple(parameters)) + + def __reduce__(self): + return self.__name__ + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + "type 'typing_extensions.TypeAliasType' is not an acceptable base type" + ) + + # The presence of this method convinces typing._type_check + # that TypeAliasTypes are types. + def __call__(self): + raise TypeError("Type alias is not callable") + + if sys.version_info >= (3, 10): + def __or__(self, right): + # For forward compatibility with 3.12, reject Unions + # that are not accepted by the built-in Union. + if not _is_unionable(right): + return NotImplemented + return typing.Union[self, right] + + def __ror__(self, left): + if not _is_unionable(left): + return NotImplemented + return typing.Union[left, self] + + +if hasattr(typing, "is_protocol"): + is_protocol = typing.is_protocol + get_protocol_members = typing.get_protocol_members +else: + def is_protocol(tp: type, /) -> bool: + """Return True if the given type is a Protocol. + + Example:: + + >>> from typing_extensions import Protocol, is_protocol + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> is_protocol(P) + True + >>> is_protocol(int) + False + """ + return ( + isinstance(tp, type) + and getattr(tp, '_is_protocol', False) + and tp is not Protocol + and tp is not typing.Protocol + ) + + def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]: + """Return the set of members defined in a Protocol. + + Example:: + + >>> from typing_extensions import Protocol, get_protocol_members + >>> class P(Protocol): + ... def a(self) -> str: ... + ... b: int + >>> get_protocol_members(P) + frozenset({'a', 'b'}) + + Raise a TypeError for arguments that are not Protocols. + """ + if not is_protocol(tp): + raise TypeError(f'{tp!r} is not a Protocol') + if hasattr(tp, '__protocol_attrs__'): + return frozenset(tp.__protocol_attrs__) + return frozenset(_get_protocol_attrs(tp)) + + +if hasattr(typing, "Doc"): + Doc = typing.Doc +else: + class Doc: + """Define the documentation of a type annotation using ``Annotated``, to be + used in class attributes, function and method parameters, return values, + and variables. + + The value should be a positional-only string literal to allow static tools + like editors and documentation generators to use it. + + This complements docstrings. + + The string value passed is available in the attribute ``documentation``. + + Example:: + + >>> from typing_extensions import Annotated, Doc + >>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ... + """ + def __init__(self, documentation: str, /) -> None: + self.documentation = documentation + + def __repr__(self) -> str: + return f"Doc({self.documentation!r})" + + def __hash__(self) -> int: + return hash(self.documentation) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Doc): + return NotImplemented + return self.documentation == other.documentation + + +# Aliases for items that have always been in typing. +# Explicitly assign these (rather than using `from typing import *` at the top), +# so that we get a CI error if one of these is deleted from typing.py +# in a future version of Python +AbstractSet = typing.AbstractSet +AnyStr = typing.AnyStr +BinaryIO = typing.BinaryIO +Callable = typing.Callable +Collection = typing.Collection +Container = typing.Container +Dict = typing.Dict +ForwardRef = typing.ForwardRef +FrozenSet = typing.FrozenSet +Generator = typing.Generator +Generic = typing.Generic +Hashable = typing.Hashable +IO = typing.IO +ItemsView = typing.ItemsView +Iterable = typing.Iterable +Iterator = typing.Iterator +KeysView = typing.KeysView +List = typing.List +Mapping = typing.Mapping +MappingView = typing.MappingView +Match = typing.Match +MutableMapping = typing.MutableMapping +MutableSequence = typing.MutableSequence +MutableSet = typing.MutableSet +Optional = typing.Optional +Pattern = typing.Pattern +Reversible = typing.Reversible +Sequence = typing.Sequence +Set = typing.Set +Sized = typing.Sized +TextIO = typing.TextIO +Tuple = typing.Tuple +Union = typing.Union +ValuesView = typing.ValuesView +cast = typing.cast +no_type_check = typing.no_type_check +no_type_check_decorator = typing.no_type_check_decorator diff --git a/llmeval-env/pyvenv.cfg b/llmeval-env/pyvenv.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0537ffc00ba0979f914c62c057a72f4bf1f681c4 --- /dev/null +++ b/llmeval-env/pyvenv.cfg @@ -0,0 +1,3 @@ +home = /usr/bin +include-system-site-packages = false +version = 3.10.12 diff --git a/lm-evaluation/build/lib/lm_eval/decontamination/__init__.py b/lm-evaluation/build/lib/lm_eval/decontamination/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation/build/lib/lm_eval/decontamination/archiver.py b/lm-evaluation/build/lib/lm_eval/decontamination/archiver.py new file mode 100644 index 0000000000000000000000000000000000000000..fa8a715f78e4cccef9f930e5cf448c4481730c2d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/decontamination/archiver.py @@ -0,0 +1,171 @@ +import datetime +import io +import json +import mmap +import os +from pathlib import Path +from typing import Any + +import jsonlines +import tqdm +import zstandard + + +def json_serial(obj: Any) -> str: + """JSON serializer for objects not serializable by default json code""" + + if isinstance(obj, (datetime.datetime,)): + return obj.isoformat() + raise TypeError("Type %s not serializable" % type(obj)) + + +# Modified version of lm_dataformat Archive for single file. +class Archive: + def __init__(self, file_path: str, compression_level: int = 3) -> None: + self.file_path = file_path + dir_name = os.path.dirname(file_path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + self.fh = open(self.file_path, "wb") + self.cctx = zstandard.ZstdCompressor(level=compression_level) + self.compressor = self.cctx.stream_writer(self.fh) + + def add_data(self, data, meta=None) -> None: + if meta is None: + meta = {} + self.compressor.write( + json.dumps({"text": data, "meta": meta}, default=json_serial).encode( + "UTF-8" + ) + + b"\n" + ) + + def commit(self) -> None: + self.compressor.flush(zstandard.FLUSH_FRAME) + self.fh.flush() + self.fh.close() + + +# Modified version of lm_dataformat Reader with self.fh set, allowing peeking for tqdm. +class Reader: + def __init__(self) -> None: + pass + + def read( + self, + file, + get_meta: bool = False, + autojoin_paragraphs: bool = True, + para_joiner: str = "\n\n", + ): + with open(file, "rb") as fh: + self.fh = fh + cctx = zstandard.ZstdDecompressor() + reader = io.BufferedReader(cctx.stream_reader(fh)) + rdr = jsonlines.Reader(reader) + for ob in rdr: + # naive jsonl where each object is just the string itself, with no meta. For legacy compatibility. + if isinstance(ob, str): + assert not get_meta + yield ob + continue + + text = ob["text"] + + if autojoin_paragraphs and isinstance(text, list): + text = para_joiner.join(text) + + if get_meta: + yield text, (ob["meta"] if "meta" in ob else {}) + else: + yield text + + +class TextArchive: + def __init__(self, file_path, mode: str = "rb+") -> None: + self.file_path = file_path + dir_name = os.path.dirname(file_path) + if dir_name: + os.makedirs(dir_name, exist_ok=True) + + if not os.path.exists(file_path): + Path(file_path).touch() + + self.fh = open(self.file_path, mode) + + def add_data(self, data) -> None: + self.fh.write(data.encode("UTF-8") + b"\n") + + def commit(self) -> None: + self.fh.flush() + self.fh.close() + + +class TextReader: + def __init__(self, file_path) -> None: + self.file_path = file_path + + # Optimized mmap read with infrequent tqdm updates to maintain speed + # Tested up to 250MB/s. + def read_tqdm(self, update_frequency: int = 10000): + current_file_position = 0 + line_counter = 0 + with open(self.file_path, "r", encoding="utf-8") as fh, tqdm.tqdm( + total=os.path.getsize(self.file_path), + dynamic_ncols=True, + unit="byte", + unit_scale=1, + ) as progress: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + line_counter += 1 + if line_counter == update_frequency: + new_file_pos = mmap_obj.tell() + bytes_read = new_file_pos - current_file_position + current_file_position = new_file_pos + progress.update(bytes_read) + line_counter = 0 + yield line[:-1] + + def read_and_tell(self): + current_file_position = 0 + with open(self.file_path, "r", encoding="utf8") as fh: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + new_file_pos = mmap_obj.tell() + raw_bytes_read = new_file_pos - current_file_position + current_file_position = new_file_pos + yield line[:-1], raw_bytes_read + + def read(self): + with open(self.file_path, "r", encoding="utf8") as fh: + with mmap.mmap(fh.fileno(), length=0, access=mmap.ACCESS_READ) as mmap_obj: + for line in iter(mmap_obj.readline, b""): + line = line.decode("utf-8") + yield line[:-1] + + def read_slow(self): + with open(self.file_path, "r", encoding="utf8") as fh: + while True: + line = fh.readline() + if line == -1 or line == "": + break + else: + yield line[:-1] + + +# Optimized for speed. Decompresses the archive in shell before +# using the mmap'd TextReader. +class ZStdTextReader: + def __init__(self, file) -> None: + self.file = file + + def read_tqdm(self): + decompressed_file = self.file[:-4] + print("Decompressing file, please wait...") + os.system(f"zstd -d {self.file}") # linux decompress is faster + reader = TextReader(decompressed_file) + yield from reader.read_tqdm() + os.remove(decompressed_file) diff --git a/lm-evaluation/lm_eval.egg-info/PKG-INFO b/lm-evaluation/lm_eval.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..517c7fb5fd75d4e4398157ae042e6cce87ec315b --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/PKG-INFO @@ -0,0 +1,542 @@ +Metadata-Version: 2.1 +Name: lm_eval +Version: 0.4.2 +Summary: A framework for evaluating language models +Author-email: EleutherAI +License: MIT +Project-URL: Homepage, https://github.com/EleutherAI/lm-evaluation-harness +Project-URL: Repository, https://github.com/EleutherAI/lm-evaluation-harness +Classifier: Development Status :: 3 - Alpha +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE.md +Requires-Dist: accelerate>=0.21.0 +Requires-Dist: evaluate +Requires-Dist: datasets>=2.16.0 +Requires-Dist: evaluate>=0.4.0 +Requires-Dist: jsonlines +Requires-Dist: numexpr +Requires-Dist: peft>=0.2.0 +Requires-Dist: pybind11>=2.6.2 +Requires-Dist: pytablewriter +Requires-Dist: rouge-score>=0.0.4 +Requires-Dist: sacrebleu>=1.5.0 +Requires-Dist: scikit-learn>=0.24.1 +Requires-Dist: sqlitedict +Requires-Dist: torch>=1.8 +Requires-Dist: tqdm-multiprocess +Requires-Dist: transformers>=4.1 +Requires-Dist: zstandard +Requires-Dist: dill +Requires-Dist: word2number +Requires-Dist: more_itertools +Provides-Extra: anthropic +Requires-Dist: anthropic; extra == "anthropic" +Provides-Extra: dev +Requires-Dist: pytest; extra == "dev" +Requires-Dist: pytest-cov; extra == "dev" +Requires-Dist: pytest-xdist; extra == "dev" +Requires-Dist: pre-commit; extra == "dev" +Requires-Dist: mypy; extra == "dev" +Provides-Extra: gptq +Requires-Dist: auto-gptq[triton]>=0.6.0; extra == "gptq" +Provides-Extra: hf-transfer +Requires-Dist: hf_transfer; extra == "hf-transfer" +Provides-Extra: ifeval +Requires-Dist: langdetect; extra == "ifeval" +Requires-Dist: immutabledict; extra == "ifeval" +Provides-Extra: neuronx +Requires-Dist: optimum[neuronx]; extra == "neuronx" +Provides-Extra: mamba +Requires-Dist: mamba_ssm; extra == "mamba" +Requires-Dist: causal-conv1d==1.0.2; extra == "mamba" +Provides-Extra: math +Requires-Dist: sympy>=1.12; extra == "math" +Requires-Dist: antlr4-python3-runtime==4.11; extra == "math" +Provides-Extra: multilingual +Requires-Dist: nagisa>=0.2.7; extra == "multilingual" +Requires-Dist: jieba>=0.42.1; extra == "multilingual" +Requires-Dist: pycountry; extra == "multilingual" +Provides-Extra: openai +Requires-Dist: openai==1.3.9; extra == "openai" +Requires-Dist: tiktoken; extra == "openai" +Provides-Extra: optimum +Requires-Dist: optimum[openvino]; extra == "optimum" +Provides-Extra: promptsource +Requires-Dist: promptsource>=0.2.3; extra == "promptsource" +Provides-Extra: sentencepiece +Requires-Dist: sentencepiece>=0.1.98; extra == "sentencepiece" +Requires-Dist: protobuf>=4.22.1; extra == "sentencepiece" +Provides-Extra: testing +Requires-Dist: pytest; extra == "testing" +Requires-Dist: pytest-cov; extra == "testing" +Requires-Dist: pytest-xdist; extra == "testing" +Provides-Extra: vllm +Requires-Dist: vllm==0.3.2; extra == "vllm" +Provides-Extra: zeno +Requires-Dist: pandas; extra == "zeno" +Requires-Dist: zeno-client; extra == "zeno" +Provides-Extra: wandb +Requires-Dist: wandb>=0.16.3; extra == "wandb" +Requires-Dist: pandas; extra == "wandb" +Requires-Dist: numpy; extra == "wandb" +Provides-Extra: all +Requires-Dist: lm_eval[anthropic]; extra == "all" +Requires-Dist: lm_eval[dev]; extra == "all" +Requires-Dist: lm_eval[gptq]; extra == "all" +Requires-Dist: lm_eval[hf_transfer]; extra == "all" +Requires-Dist: lm_eval[ifeval]; extra == "all" +Requires-Dist: lm_eval[mamba]; extra == "all" +Requires-Dist: lm_eval[math]; extra == "all" +Requires-Dist: lm_eval[multilingual]; extra == "all" +Requires-Dist: lm_eval[openai]; extra == "all" +Requires-Dist: lm_eval[promptsource]; extra == "all" +Requires-Dist: lm_eval[sentencepiece]; extra == "all" +Requires-Dist: lm_eval[testing]; extra == "all" +Requires-Dist: lm_eval[vllm]; extra == "all" +Requires-Dist: lm_eval[zeno]; extra == "all" +Requires-Dist: lm_eval[wandb]; extra == "all" + +# Language Model Evaluation Harness + +[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10256836.svg)](https://doi.org/10.5281/zenodo.10256836) + +## Announcement +**A new v0.4.0 release of lm-evaluation-harness is available** ! + +New updates and features include: + +- Internal refactoring +- Config-based task creation and configuration +- Easier import and sharing of externally-defined task config YAMLs +- Support for Jinja2 prompt design, easy modification of prompts + prompt imports from Promptsource +- More advanced configuration options, including output post-processing, answer extraction, and multiple LM generations per document, configurable fewshot settings, and more +- Speedups and new modeling libraries supported, including: faster data-parallel HF model usage, vLLM support, MPS support with HuggingFace, and more +- Logging and usability changes +- New tasks including CoT BIG-Bench-Hard, Belebele, user-defined task groupings, and more + +Please see our updated documentation pages in `docs/` for more details. + +Development will be continuing on the `main` branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub, or in the [EleutherAI discord](https://discord.gg/eleutherai)! + +## Overview + +This project provides a unified framework to test generative language models on a large number of different evaluation tasks. + +**Features:** +- Over 60 standard academic benchmarks for LLMs, with hundreds of subtasks and variants implemented. +- Support for models loaded via [transformers](https://github.com/huggingface/transformers/) (including quantization via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)), [GPT-NeoX](https://github.com/EleutherAI/gpt-neox), and [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/), with a flexible tokenization-agnostic interface. +- Support for fast and memory-efficient inference with [vLLM](https://github.com/vllm-project/vllm). +- Support for commercial APIs including [OpenAI](https://openai.com), and [TextSynth](https://textsynth.com/). +- Support for evaluation on adapters (e.g. LoRA) supported in [HuggingFace's PEFT library](https://github.com/huggingface/peft). +- Support for local models and benchmarks. +- Evaluation with publicly available prompts ensures reproducibility and comparability between papers. +- Easy support for custom prompts and evaluation metrics. + +The Language Model Evaluation Harness is the backend for 🤗 Hugging Face's popular [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), has been used in [hundreds of papers](https://scholar.google.com/scholar?oi=bibs&hl=en&authuser=2&cites=15052937328817631261,4097184744846514103,1520777361382155671,17476825572045927382,18443729326628441434,14801318227356878622,7890865700763267262,12854182577605049984,15641002901115500560,5104500764547628290), and is used internally by dozens of organizations including NVIDIA, Cohere, BigScience, BigCode, Nous Research, and Mosaic ML. + +## Install + +To install the `lm-eval` package from the github repository, run: + +```bash +git clone https://github.com/EleutherAI/lm-evaluation-harness +cd lm-evaluation-harness +pip install -e . +``` + +We also provide a number of optional dependencies for extended functionality. A detailed table is available at the end of this document. + +## Basic Usage + +### Hugging Face `transformers` + +To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. GPT-J-6B) on `hellaswag` you can use the following command (this assumes you are using a CUDA-compatible GPU): + +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6B \ + --tasks hellaswag \ + --device cuda:0 \ + --batch_size 8 +``` + +Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model: + +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ + --tasks lambada_openai,hellaswag \ + --device cuda:0 \ + --batch_size 8 +``` + +Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported. + +Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be: + +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \ + --tasks lambada_openai,hellaswag \ + --device cuda:0 \ + --batch_size auto:4 +``` + +The full list of supported arguments are provided [here](./docs/interface.md), and on the terminal by calling `lm_eval -h`. Alternatively, you can use `lm-eval` instead of `lm_eval`. + +> [!Note] +> Just like you can provide a local path to `transformers.AutoModel`, you can also provide a local path to `lm_eval` via `--model_args pretrained=/path/to/model` + +#### Multi-GPU Evaluation with Hugging Face `accelerate` + +We support two main ways of using Hugging Face's [accelerate 🚀](https://github.com/huggingface/accelerate) library for multi-GPU evaluation. + +To perform *data-parallel evaluation* (where each GPU loads a **separate full copy** of the model), we leverage the `accelerate` launcher as follows: + +``` +accelerate launch -m lm_eval --model hf \ + --tasks lambada_openai,arc_easy \ + --batch_size 16 +``` +(or via `accelerate launch --no-python lm_eval`). + +For cases where your model can fit on a single GPU, this allows you to evaluate on K GPUs K times faster than on one. + +**WARNING**: This setup does not work with FSDP model sharding, so in `accelerate config` FSDP must be disabled, or the NO_SHARD FSDP option must be used. + +The second way of using `accelerate` for multi-GPU evaluation is when your model is *too large to fit on a single GPU.* + +In this setting, run the library *outside of the `accelerate` launcher*, but passing `parallelize=True` to `--model_args` as follows: + +``` +lm_eval --model hf \ + --tasks lambada_openai,arc_easy \ + --model_args parallelize=True \ + --batch_size 16 +``` + +This means that your model's weights will be split across all available GPUs. + +For more advanced users or even larger models, we allow for the following arguments when `parallelize=True` as well: +- `device_map_option`: How to split model weights across available GPUs. defaults to "auto". +- `max_memory_per_gpu`: the max GPU memory to use per GPU in loading the model. +- `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM. +- `offload_folder`: a folder where model weights will be offloaded to disk if needed. + +These two options (`accelerate launch` and `parallelize=True`) are mutually exclusive. + +**Note: we do not currently support multi-node evaluations natively, and advise using either an externally hosted server to run inference requests against, or creating a custom integration with your distributed framework [as is done for the GPT-NeoX library](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py).** + +### NVIDIA `nemo` models + +[NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo) is a generative AI framework built for researchers and pytorch developers working on language models. + +To evaluate a `nemo` model, start by installing NeMo following [the documentation](https://github.com/NVIDIA/NeMo?tab=readme-ov-file#installation). We highly recommended to use the NVIDIA PyTorch or NeMo container, especially if having issues installing Apex or any other dependencies (see [latest released containers](https://github.com/NVIDIA/NeMo/releases)). Please also install the lm evaluation harness library following the instructions in [the Install section](https://github.com/EleutherAI/lm-evaluation-harness/tree/main?tab=readme-ov-file#install). + +NeMo models can be obtained through [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/models) or in [NVIDIA's Hugging Face page](https://huggingface.co/nvidia). In [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo/tree/main/scripts/nlp_language_modeling) there are conversion scripts to convert the `hf` checkpoints of popular models like llama, falcon, mixtral or mpt to `nemo`. + +Run a `nemo` model on one GPU: +```bash +lm_eval --model nemo_lm \ + --model_args path= \ + --tasks hellaswag \ + --batch_size 32 +``` + +It is recommended to unpack the `nemo` model to avoid the unpacking inside the docker container - it may overflow disk space. For that you can run: + +``` +mkdir MY_MODEL +tar -xvf MY_MODEL.nemo -c MY_MODEL +``` + +#### Multi-GPU evaluation with NVIDIA `nemo` models + +By default, only one GPU is used. But we do support either data replication or tensor/pipeline parallelism during evaluation, on one node. + +1) To enable data replication, set the `model_args` of `devices` to the number of data replicas to run. For example, the command to run 8 data replicas over 8 GPUs is: +```bash +torchrun --nproc-per-node=8 --no-python lm_eval \ + --model nemo_lm \ + --model_args path=,devices=8 \ + --tasks hellaswag \ + --batch_size 32 +``` + +2) To enable tensor and/or pipeline parallelism, set the `model_args` of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. In addition, you also have to set up `devices` to be equal to the product of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. For example, the command to use one node of 4 GPUs with tensor parallelism of 2 and pipeline parallelism of 2 is: +```bash +torchrun --nproc-per-node=4 --no-python lm_eval \ + --model nemo_lm \ + --model_args path=,devices=4,tensor_model_parallel_size=2,pipeline_model_parallel_size=2 \ + --tasks hellaswag \ + --batch_size 32 +``` +Note that it is recommended to substitute the `python` command by `torchrun --nproc-per-node= --no-python` to facilitate loading the model into the GPUs. This is especially important for large checkpoints loaded into multiple GPUs. + +Not supported yet: multi-node evaluation and combinations of data replication with tensor or pipeline parallelism. + +### Tensor + Data Parallel and Optimized Inference with `vLLM` + +We also support vLLM for faster inference on [supported model types](https://docs.vllm.ai/en/latest/models/supported_models.html), especially faster when splitting a model across multiple GPUs. For single-GPU or multi-GPU — tensor parallel, data parallel, or a combination of both — inference, for example: + +```bash +lm_eval --model vllm \ + --model_args pretrained={model_name},tensor_parallel_size={GPUs_per_model},dtype=auto,gpu_memory_utilization=0.8,data_parallel_size={model_replicas} \ + --tasks lambada_openai \ + --batch_size auto +``` +To use vllm, do `pip install lm_eval[vllm]`. For a full list of supported vLLM configurations, please reference our [vLLM integration](https://github.com/EleutherAI/lm-evaluation-harness/blob/e74ec966556253fbe3d8ecba9de675c77c075bce/lm_eval/models/vllm_causallms.py) and the vLLM documentation. + +vLLM occasionally differs in output from Huggingface. We treat Huggingface as the reference implementation, and provide a [script](./scripts/model_comparator.py) for checking the validity of vllm results against HF. + +> [!Tip] +> For fastest performance, we recommend using `--batch_size auto` for vLLM whenever possible, to leverage its continuous batching functionality! + +> [!Tip] +> Passing `max_model_len=4096` or some other reasonable default to vLLM through model args may cause speedups or prevent out-of-memory errors when trying to use auto batch size, such as for Mistral-7B-v0.1 which defaults to a maximum length of 32k. + +### Model APIs and Inference Servers + +Our library also supports the evaluation of models served via several commercial APIs, and we hope to implement support for the most commonly used performant local/self-hosted inference servers. + +To call a hosted model, use: + +```bash +export OPENAI_API_KEY=YOUR_KEY_HERE +lm_eval --model openai-completions \ + --model_args model=davinci \ + --tasks lambada_openai,hellaswag +``` + +We also support using your own local inference server with servers that mirror the OpenAI Completions and ChatCompletions APIs. + +```bash +lm_eval --model local-chat-completions --tasks gsm8k --model_args model=facebook/opt-125m,base_url=http://{yourip}:8000/v1 +``` +Note that for externally hosted models, configs such as `--device` and `--batch_size` should not be used and do not function. Just like you can use `--model_args` to pass arbitrary arguments to the model constructor for local models, you can use it to pass arbitrary arguments to the model API for hosted models. See the documentation of the hosting service for information on what arguments they support. + +| API or Inference Server | Implemented? | `--model ` name | Models supported: | Request Types: | +|---------------------------------------------------------------------------------------------------------------------------|---------------------------------|---------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|------------------------------------------------------------| +| OpenAI Completions | :heavy_check_mark: | `openai-completions`, `local-completions` | All OpenAI Completions API models | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| OpenAI ChatCompletions | :heavy_check_mark: | `openai-chat-completions`, `local-chat-completions` | [All ChatCompletions API models](https://platform.openai.com/docs/guides/gpt) | `generate_until` (no logprobs) | +| Anthropic | :heavy_check_mark: | `anthropic` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/reference/selecting-a-model) | `generate_until` (no logprobs) | +| Anthropic Chat | :heavy_check_mark: | `anthropic-chat`, `anthropic-chat-completions` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/docs/models-overview) | `generate_until` (no logprobs) | +| Textsynth | :heavy_check_mark: | `textsynth` | [All supported engines](https://textsynth.com/documentation.html#engines) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Cohere | [:hourglass: - blocked on Cohere API bug](https://github.com/EleutherAI/lm-evaluation-harness/pull/395) | N/A | [All `cohere.generate()` engines](https://docs.cohere.com/docs/models) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| [Llama.cpp](https://github.com/ggerganov/llama.cpp) (via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)) | :heavy_check_mark: | `gguf`, `ggml` | [All models supported by llama.cpp](https://github.com/ggerganov/llama.cpp) | `generate_until`, `loglikelihood`, (perplexity evaluation not yet implemented) | +| vLLM | :heavy_check_mark: | `vllm` | [Most HF Causal Language Models](https://docs.vllm.ai/en/latest/models/supported_models.html) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Mamba | :heavy_check_mark: | `mamba_ssm` | [Mamba architecture Language Models via the `mamba_ssm` package](https://huggingface.co/state-spaces) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | +| Huggingface Optimum (Causal LMs) | ✔️ | `openvino` | Any decoder-only AutoModelForCausalLM converted with Huggingface Optimum into OpenVINO™ Intermediate Representation (IR) format | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... | +| Neuron via AWS Inf2 (Causal LMs) | ✔️ | `neuronx` | Any decoder-only AutoModelForCausalLM supported to run on [huggingface-ami image for inferentia2](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... | +| Your local inference server! | :heavy_check_mark: | `local-completions` or `local-chat-completions` (using `openai-chat-completions` model type) | Any server address that accepts GET requests using HF models and mirror's OpenAI's Completions or ChatCompletions interface | `generate_until` | | ... | + +Models which do not supply logits or logprobs can be used with tasks of type `generate_until` only, while local models, or APIs that supply logprobs/logits of their prompts, can be run on all task types: `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`. + +For more information on the different task `output_types` and model request types, see [our documentation](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/model_guide.md#interface). + +> [!Note] +> For best performance with closed chat model APIs such as Anthropic Claude 3 and GPT-4, we recommend carefully looking at a few sample outputs using `--limit 10` first to confirm answer extraction and scoring on generative tasks is performing as expected. providing `system=""` within `--model_args` for anthropic-chat-completions, to instruct the model what format to respond in, may be useful. + + +### Other Frameworks + +A number of other libraries contain scripts for calling the eval harness through their library. These include [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py), [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/MoE/readme_evalharness.md), and [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py). + +To create your own custom integration you can follow instructions from [this tutorial](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage). + +### Additional Features +> [!Note] +> For tasks unsuitable for direct evaluation — either due risks associated with executing untrusted code or complexities in the evaluation process — the `--predict_only` flag is available to obtain decoded generations for post-hoc evaluation. + +If you have a Metal compatible Mac, you can run the eval harness using the MPS back-end by replacing `--device cuda:0` with `--device mps` (requires PyTorch version 2.1 or higher). + +> [!Note] +> You can inspect what the LM inputs look like by running the following command: +> ```bash +> python write_out.py \ +> --tasks \ +> --num_fewshot 5 \ +> --num_examples 10 \ +> --output_base_path /path/to/output/folder +> ``` +> This will write out one text file for each task. + +To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag: + +```bash +lm_eval --model openai \ + --model_args engine=davinci \ + --tasks lambada_openai,hellaswag \ + --check_integrity +``` + +## Advanced Usage Tips + +For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument: +```bash +lm_eval --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \ + --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \ + --device cuda:0 +``` + +[GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,autogptq=NAME` (or `,autogptq=True` for default names) in the `model_args` argument: + +```bash +lm_eval --model hf \ + --model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \ + --tasks hellaswag +``` + +We support wildcards in task names, for example you can run all of the machine-translated lambada tasks via `--task lambada_openai_mt_*`. + +To save evaluation results provide an `--output_path`. We also support logging model responses with the `--log_samples` flag for post-hoc analysis. + +Additionally, one can provide a directory with `--use_cache` to cache the results of prior runs. This allows you to avoid repeated execution of the same (model, task) pairs for re-scoring. + +For a full list of supported arguments, check out the [interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md) guide in our documentation! + +## Visualizing Results + +You can seamlessly visualize and analyze the results of your evaluation harness runs using both Weights & Biases (W&B) and Zeno. + +### Zeno + +You can use [Zeno](https://zenoml.com) to visualize the results of your eval harness runs. + +First, head to [hub.zenoml.com](https://hub.zenoml.com) to create an account and get an API key [on your account page](https://hub.zenoml.com/account). +Add this key as an environment variable: + +```bash +export ZENO_API_KEY=[your api key] +``` + +You'll also need to install the `lm_eval[zeno]` package extra. + +To visualize the results, run the eval harness with the `log_samples` and `output_path` flags. +We expect `output_path` to contain multiple folders that represent individual model names. +You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno. + +```bash +lm_eval \ + --model hf \ + --model_args pretrained=EleutherAI/gpt-j-6B \ + --tasks hellaswag \ + --device cuda:0 \ + --batch_size 8 \ + --log_samples \ + --output_path output/gpt-j-6B +``` + +Then, you can upload the resulting data using the `zeno_visualize` script: + +```bash +python scripts/zeno_visualize.py \ + --data_path output \ + --project_name "Eleuther Project" +``` + +This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno. +If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task. + +You can find an example of this workflow in [examples/visualize-zeno.ipynb](examples/visualize-zeno.ipynb). + +### Weights and Biases + +With the [Weights and Biases](https://wandb.ai/site) integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform. + +The integration provide functionalities + +- to automatically log the evaluation results, +- log the samples as W&B Tables for easy visualization, +- log the `results.json` file as an artifact for version control, +- log the `_eval_samples.json` file if the samples are logged, +- generate a comprehensive report for analysis and visualization with all the important metric, +- log task and cli specific configs, +- and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc. + +First you'll need to install the lm_eval[wandb] package extra. Do `pip install lm_eval[wandb]`. + +Authenticate your machine with an your unique W&B token. Visit https://wandb.ai/authorize to get one. Do `wandb login` in your command line terminal. + +Run eval harness as usual with a `wandb_args` flag. Use this flag to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments. + +```bash +lm_eval \ + --model hf \ + --model_args pretrained=microsoft/phi-2,trust_remote_code=True \ + --tasks hellaswag,mmlu_abstract_algebra \ + --device cuda:0 \ + --batch_size 8 \ + --output_path output/phi-2 \ + --limit 10 \ + --wandb_args project=lm-eval-harness-integration \ + --log_samples +``` + +In the stdout, you will find the link to the W&B run page as well as link to the generated report. You can find an example of this workflow in [examples/visualize-wandb.ipynb](examples/visualize-wandb.ipynb), and an example of how to integrate it beyond the CLI. + +## How to Contribute or Learn More? + +For more information on the library and how everything fits together, check out all of our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs)! We plan to post a larger roadmap of desired + planned library improvements soon, with more information on how contributors can help. + +### Implementing new tasks + +To implement a new task in the eval harness, see [this guide](./docs/new_task_guide.md). + +In general, we follow this priority list for addressing concerns about prompting and other eval details: +1. If there is widespread agreement among people who train LLMs, use the agreed upon procedure. +2. If there is a clear and unambiguous official implementation, use that procedure. +3. If there is widespread agreement among people who evaluate LLMs, use the agreed upon procedure. +4. If there are multiple common implementations but not universal or widespread agreement, use our preferred option among the common implementations. As before, prioritize choosing from among the implementations found in LLM training papers. + +These are guidelines and not rules, and can be overruled in special circumstances. + +We try to prioritize agreement with the procedures used by other groups to decrease the harm when people inevitably compare runs across different papers despite our discouragement of the practice. Historically, we also prioritized the implementation from [Language Models are Few Shot Learners](https://arxiv.org/abs/2005.14165) as our original goal was specifically to compare results with that paper. + +### Support + +The best way to get support is to open an issue on this repo or join the [EleutherAI Discord server](https://discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. If you've used the library and have had a positive (or negative) experience, we'd love to hear from you! + +## Optional Extras +Extras dependencies can be installed via `pip install -e ".[NAME]"` + +| Name | Use | +|---------------|---------------------------------------| +| anthropic | For using Anthropic's models | +| dev | For linting PRs and contributions | +| gptq | For loading models with GPTQ | +| hf_transfer | For speeding up HF Hub file downloads | +| ifeval | For running the IFEval task | +| neuronx | For running on AWS inf2 instances | +| mamba | For loading Mamba SSM models | +| math | For running math task answer checking | +| multilingual | For multilingual tokenizers | +| openai | For using OpenAI's models | +| optimum | For running Intel OpenVINO models | +| promptsource | For using PromptSource prompts | +| sentencepiece | For using the sentencepiece tokenizer | +| testing | For running library test suite | +| vllm | For loading models with vLLM | +| zeno | For visualizing results with Zeno | +|---------------|---------------------------------------| +| all | Loads all extras (not recommended) | + +## Cite as + +``` +@misc{eval-harness, + author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = 12, + year = 2023, + publisher = {Zenodo}, + version = {v0.4.0}, + doi = {10.5281/zenodo.10256836}, + url = {https://zenodo.org/records/10256836} +} +``` diff --git a/lm-evaluation/lm_eval.egg-info/SOURCES.txt b/lm-evaluation/lm_eval.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..138508c0bbe95c22a671d4c2165e56bfc027170f --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/SOURCES.txt @@ -0,0 +1,2492 @@ +LICENSE.md +README.md +pyproject.toml +setup.py +lm_eval/__init__.py +lm_eval/__main__.py +lm_eval/evaluator.py +lm_eval/evaluator_utils.py +lm_eval/logging_utils.py +lm_eval/utils.py +lm_eval.egg-info/PKG-INFO +lm_eval.egg-info/SOURCES.txt +lm_eval.egg-info/dependency_links.txt +lm_eval.egg-info/entry_points.txt +lm_eval.egg-info/requires.txt +lm_eval.egg-info/top_level.txt +lm_eval/api/__init__.py +lm_eval/api/filter.py +lm_eval/api/instance.py +lm_eval/api/metrics.py +lm_eval/api/model.py +lm_eval/api/registry.py +lm_eval/api/samplers.py +lm_eval/api/task.py +lm_eval/caching/cache.py +lm_eval/decontamination/__init__.py +lm_eval/decontamination/archiver.py +lm_eval/decontamination/decontaminate.py +lm_eval/decontamination/janitor.py +lm_eval/filters/__init__.py +lm_eval/filters/decontamination.py +lm_eval/filters/extraction.py +lm_eval/filters/selection.py +lm_eval/filters/transformation.py +lm_eval/models/__init__.py +lm_eval/models/anthropic_llms.py +lm_eval/models/dummy.py +lm_eval/models/gguf.py +lm_eval/models/huggingface.py +lm_eval/models/mamba_lm.py +lm_eval/models/nemo_lm.py +lm_eval/models/neuron_optimum.py +lm_eval/models/openai_completions.py +lm_eval/models/optimum_lm.py +lm_eval/models/textsynth.py +lm_eval/models/utils.py +lm_eval/models/vllm_causallms.py +lm_eval/prompts/__init__.py +lm_eval/tasks/__init__.py +lm_eval/tasks/__pycache__/__init__.cpython-310.pyc +lm_eval/tasks/aclue/README.md +lm_eval/tasks/aclue/_default_template_yaml +lm_eval/tasks/aclue/_generate_configs.py +lm_eval/tasks/aclue/aclue_ancient_chinese_culture.yaml +lm_eval/tasks/aclue/aclue_ancient_literature.yaml +lm_eval/tasks/aclue/aclue_ancient_medical.yaml +lm_eval/tasks/aclue/aclue_ancient_phonetics.yaml +lm_eval/tasks/aclue/aclue_basic_ancient_chinese.yaml +lm_eval/tasks/aclue/aclue_couplet_prediction.yaml +lm_eval/tasks/aclue/aclue_homographic_character_resolution.yaml +lm_eval/tasks/aclue/aclue_named_entity_recognition.yaml +lm_eval/tasks/aclue/aclue_poetry_appreciate.yaml +lm_eval/tasks/aclue/aclue_poetry_context_prediction.yaml +lm_eval/tasks/aclue/aclue_poetry_quality_assessment.yaml +lm_eval/tasks/aclue/aclue_poetry_sentiment_analysis.yaml +lm_eval/tasks/aclue/aclue_polysemy_resolution.yaml +lm_eval/tasks/aclue/aclue_reading_comprehension.yaml +lm_eval/tasks/aclue/aclue_sentence_segmentation.yaml +lm_eval/tasks/aexams/README.md +lm_eval/tasks/aexams/_default_template_yaml +lm_eval/tasks/aexams/aexams_Biology.yaml +lm_eval/tasks/aexams/aexams_IslamicStudies.yaml +lm_eval/tasks/aexams/aexams_Physics.yaml +lm_eval/tasks/aexams/aexams_Science.yaml +lm_eval/tasks/aexams/aexams_Social.yaml +lm_eval/tasks/agieval/README.md +lm_eval/tasks/agieval/aqua-rat.yaml +lm_eval/tasks/agieval/gaokao-biology.yaml +lm_eval/tasks/agieval/gaokao-chemistry.yaml +lm_eval/tasks/agieval/gaokao-chinese.yaml +lm_eval/tasks/agieval/gaokao-english.yaml +lm_eval/tasks/agieval/gaokao-geography.yaml +lm_eval/tasks/agieval/gaokao-history.yaml +lm_eval/tasks/agieval/gaokao-mathcloze.yaml +lm_eval/tasks/agieval/gaokao-mathqa.yaml +lm_eval/tasks/agieval/gaokao-physics.yaml +lm_eval/tasks/agieval/jec-qa-ca.yaml +lm_eval/tasks/agieval/jec-qa-kd.yaml +lm_eval/tasks/agieval/logiqa-en.yaml +lm_eval/tasks/agieval/logiqa-zh.yaml +lm_eval/tasks/agieval/lsat-ar.yaml +lm_eval/tasks/agieval/lsat-lr.yaml +lm_eval/tasks/agieval/lsat-rc.yaml +lm_eval/tasks/agieval/math.yaml +lm_eval/tasks/agieval/sat-en-without-passage.yaml +lm_eval/tasks/agieval/sat-en.yaml +lm_eval/tasks/agieval/sat-math.yaml +lm_eval/tasks/agieval/utils.py +lm_eval/tasks/ammlu/README.md +lm_eval/tasks/ammlu/_default_template_yaml +lm_eval/tasks/ammlu/_generate_configs.py +lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml +lm_eval/tasks/ammlu/ammlu_anatomy.yaml +lm_eval/tasks/ammlu/ammlu_astronomy.yaml +lm_eval/tasks/ammlu/ammlu_business_ethics.yaml +lm_eval/tasks/ammlu/ammlu_clinical_knowledge.yaml +lm_eval/tasks/ammlu/ammlu_college_biology.yaml +lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml +lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml +lm_eval/tasks/ammlu/ammlu_college_mathematics.yaml +lm_eval/tasks/ammlu/ammlu_college_medicine.yaml +lm_eval/tasks/ammlu/ammlu_college_physics.yaml +lm_eval/tasks/ammlu/ammlu_computer_security.yaml +lm_eval/tasks/ammlu/ammlu_conceptual_physics.yaml +lm_eval/tasks/ammlu/ammlu_econometrics.yaml +lm_eval/tasks/ammlu/ammlu_electrical_engineering.yaml +lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml +lm_eval/tasks/ammlu/ammlu_formal_logic.yaml +lm_eval/tasks/ammlu/ammlu_global_facts.yaml +lm_eval/tasks/ammlu/ammlu_high_school_biology.yaml +lm_eval/tasks/ammlu/ammlu_high_school_chemistry.yaml +lm_eval/tasks/ammlu/ammlu_high_school_computer_science.yaml +lm_eval/tasks/ammlu/ammlu_high_school_european_history.yaml +lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml +lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_macroeconomics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_mathematics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_microeconomics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_physics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_psychology.yaml +lm_eval/tasks/ammlu/ammlu_high_school_statistics.yaml +lm_eval/tasks/ammlu/ammlu_high_school_us_history.yaml +lm_eval/tasks/ammlu/ammlu_high_school_world_history.yaml +lm_eval/tasks/ammlu/ammlu_human_aging.yaml +lm_eval/tasks/ammlu/ammlu_human_sexuality.yaml +lm_eval/tasks/ammlu/ammlu_international_law.yaml +lm_eval/tasks/ammlu/ammlu_jurisprudence.yaml +lm_eval/tasks/ammlu/ammlu_logical_fallacies.yaml +lm_eval/tasks/ammlu/ammlu_machine_learning.yaml +lm_eval/tasks/ammlu/ammlu_management.yaml +lm_eval/tasks/ammlu/ammlu_marketing.yaml +lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml +lm_eval/tasks/ammlu/ammlu_miscellaneous.yaml +lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml +lm_eval/tasks/ammlu/ammlu_moral_scenarios.yaml +lm_eval/tasks/ammlu/ammlu_nutrition.yaml +lm_eval/tasks/ammlu/ammlu_philosophy.yaml +lm_eval/tasks/ammlu/ammlu_prehistory.yaml +lm_eval/tasks/ammlu/ammlu_professional_accounting.yaml +lm_eval/tasks/ammlu/ammlu_professional_law.yaml +lm_eval/tasks/ammlu/ammlu_professional_medicine.yaml +lm_eval/tasks/ammlu/ammlu_professional_psychology.yaml +lm_eval/tasks/ammlu/ammlu_public_relations.yaml +lm_eval/tasks/ammlu/ammlu_security_studies.yaml +lm_eval/tasks/ammlu/ammlu_sociology.yaml +lm_eval/tasks/ammlu/ammlu_us_foreign_policy.yaml +lm_eval/tasks/ammlu/ammlu_virology.yaml +lm_eval/tasks/ammlu/ammlu_world_religions.yaml +lm_eval/tasks/anli/README.md +lm_eval/tasks/anli/anli_r1.yaml +lm_eval/tasks/anli/anli_r2.yaml +lm_eval/tasks/anli/anli_r3.yaml +lm_eval/tasks/arc/README.md +lm_eval/tasks/arc/arc_challenge.yaml +lm_eval/tasks/arc/arc_easy.yaml +lm_eval/tasks/arithmetic/README.md +lm_eval/tasks/arithmetic/arithmetic_1dc.yaml +lm_eval/tasks/arithmetic/arithmetic_2da.yaml +lm_eval/tasks/arithmetic/arithmetic_2dm.yaml +lm_eval/tasks/arithmetic/arithmetic_2ds.yaml +lm_eval/tasks/arithmetic/arithmetic_3da.yaml +lm_eval/tasks/arithmetic/arithmetic_3ds.yaml +lm_eval/tasks/arithmetic/arithmetic_4da.yaml +lm_eval/tasks/arithmetic/arithmetic_4ds.yaml +lm_eval/tasks/arithmetic/arithmetic_5da.yaml +lm_eval/tasks/arithmetic/arithmetic_5ds.yaml +lm_eval/tasks/asdiv/README.md +lm_eval/tasks/asdiv/default.yaml +lm_eval/tasks/babi/README.md +lm_eval/tasks/babi/babi.yaml +lm_eval/tasks/basqueglue/README.md +lm_eval/tasks/basqueglue/bec.yaml +lm_eval/tasks/basqueglue/bhtc.yaml +lm_eval/tasks/basqueglue/coref.yaml +lm_eval/tasks/basqueglue/qnli.yaml +lm_eval/tasks/basqueglue/utils.py +lm_eval/tasks/basqueglue/vaxx.yaml +lm_eval/tasks/basqueglue/wic.yaml +lm_eval/tasks/bbh/README.md +lm_eval/tasks/bbh/_generate_configs.py +lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml +lm_eval/tasks/bbh/cot_fewshot/boolean_expressions.yaml +lm_eval/tasks/bbh/cot_fewshot/causal_judgement.yaml +lm_eval/tasks/bbh/cot_fewshot/date_understanding.yaml +lm_eval/tasks/bbh/cot_fewshot/disambiguation_qa.yaml +lm_eval/tasks/bbh/cot_fewshot/dyck_languages.yaml +lm_eval/tasks/bbh/cot_fewshot/formal_fallacies.yaml +lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml +lm_eval/tasks/bbh/cot_fewshot/hyperbaton.yaml +lm_eval/tasks/bbh/cot_fewshot/logical_deduction_five_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/logical_deduction_seven_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/logical_deduction_three_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/movie_recommendation.yaml +lm_eval/tasks/bbh/cot_fewshot/multistep_arithmetic_two.yaml +lm_eval/tasks/bbh/cot_fewshot/navigate.yaml +lm_eval/tasks/bbh/cot_fewshot/object_counting.yaml +lm_eval/tasks/bbh/cot_fewshot/penguins_in_a_table.yaml +lm_eval/tasks/bbh/cot_fewshot/reasoning_about_colored_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/ruin_names.yaml +lm_eval/tasks/bbh/cot_fewshot/salient_translation_error_detection.yaml +lm_eval/tasks/bbh/cot_fewshot/snarks.yaml +lm_eval/tasks/bbh/cot_fewshot/sports_understanding.yaml +lm_eval/tasks/bbh/cot_fewshot/temporal_sequences.yaml +lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_five_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_seven_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_three_objects.yaml +lm_eval/tasks/bbh/cot_fewshot/web_of_lies.yaml +lm_eval/tasks/bbh/cot_fewshot/word_sorting.yaml +lm_eval/tasks/bbh/cot_zeroshot/_cot_zeroshot_template_yaml +lm_eval/tasks/bbh/cot_zeroshot/boolean_expressions.yaml +lm_eval/tasks/bbh/cot_zeroshot/causal_judgement.yaml +lm_eval/tasks/bbh/cot_zeroshot/date_understanding.yaml +lm_eval/tasks/bbh/cot_zeroshot/disambiguation_qa.yaml +lm_eval/tasks/bbh/cot_zeroshot/dyck_languages.yaml +lm_eval/tasks/bbh/cot_zeroshot/formal_fallacies.yaml +lm_eval/tasks/bbh/cot_zeroshot/geometric_shapes.yaml +lm_eval/tasks/bbh/cot_zeroshot/hyperbaton.yaml +lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_five_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_seven_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/logical_deduction_three_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/movie_recommendation.yaml +lm_eval/tasks/bbh/cot_zeroshot/multistep_arithmetic_two.yaml +lm_eval/tasks/bbh/cot_zeroshot/navigate.yaml +lm_eval/tasks/bbh/cot_zeroshot/object_counting.yaml +lm_eval/tasks/bbh/cot_zeroshot/penguins_in_a_table.yaml +lm_eval/tasks/bbh/cot_zeroshot/reasoning_about_colored_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/ruin_names.yaml +lm_eval/tasks/bbh/cot_zeroshot/salient_translation_error_detection.yaml +lm_eval/tasks/bbh/cot_zeroshot/snarks.yaml +lm_eval/tasks/bbh/cot_zeroshot/sports_understanding.yaml +lm_eval/tasks/bbh/cot_zeroshot/temporal_sequences.yaml +lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_five_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_seven_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/tracking_shuffled_objects_three_objects.yaml +lm_eval/tasks/bbh/cot_zeroshot/utils.py +lm_eval/tasks/bbh/cot_zeroshot/web_of_lies.yaml +lm_eval/tasks/bbh/cot_zeroshot/word_sorting.yaml +lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml +lm_eval/tasks/bbh/fewshot/boolean_expressions.yaml +lm_eval/tasks/bbh/fewshot/causal_judgement.yaml +lm_eval/tasks/bbh/fewshot/date_understanding.yaml +lm_eval/tasks/bbh/fewshot/disambiguation_qa.yaml +lm_eval/tasks/bbh/fewshot/dyck_languages.yaml +lm_eval/tasks/bbh/fewshot/formal_fallacies.yaml +lm_eval/tasks/bbh/fewshot/geometric_shapes.yaml +lm_eval/tasks/bbh/fewshot/hyperbaton.yaml +lm_eval/tasks/bbh/fewshot/logical_deduction_five_objects.yaml +lm_eval/tasks/bbh/fewshot/logical_deduction_seven_objects.yaml +lm_eval/tasks/bbh/fewshot/logical_deduction_three_objects.yaml +lm_eval/tasks/bbh/fewshot/movie_recommendation.yaml +lm_eval/tasks/bbh/fewshot/multistep_arithmetic_two.yaml +lm_eval/tasks/bbh/fewshot/navigate.yaml +lm_eval/tasks/bbh/fewshot/object_counting.yaml +lm_eval/tasks/bbh/fewshot/penguins_in_a_table.yaml +lm_eval/tasks/bbh/fewshot/reasoning_about_colored_objects.yaml +lm_eval/tasks/bbh/fewshot/ruin_names.yaml +lm_eval/tasks/bbh/fewshot/salient_translation_error_detection.yaml +lm_eval/tasks/bbh/fewshot/snarks.yaml +lm_eval/tasks/bbh/fewshot/sports_understanding.yaml +lm_eval/tasks/bbh/fewshot/temporal_sequences.yaml +lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_five_objects.yaml +lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_seven_objects.yaml +lm_eval/tasks/bbh/fewshot/tracking_shuffled_objects_three_objects.yaml +lm_eval/tasks/bbh/fewshot/web_of_lies.yaml +lm_eval/tasks/bbh/fewshot/word_sorting.yaml +lm_eval/tasks/bbh/zeroshot/_zeroshot_template_yaml +lm_eval/tasks/bbh/zeroshot/boolean_expressions.yaml +lm_eval/tasks/bbh/zeroshot/causal_judgement.yaml +lm_eval/tasks/bbh/zeroshot/date_understanding.yaml +lm_eval/tasks/bbh/zeroshot/disambiguation_qa.yaml +lm_eval/tasks/bbh/zeroshot/dyck_languages.yaml +lm_eval/tasks/bbh/zeroshot/formal_fallacies.yaml +lm_eval/tasks/bbh/zeroshot/geometric_shapes.yaml +lm_eval/tasks/bbh/zeroshot/hyperbaton.yaml +lm_eval/tasks/bbh/zeroshot/logical_deduction_five_objects.yaml +lm_eval/tasks/bbh/zeroshot/logical_deduction_seven_objects.yaml +lm_eval/tasks/bbh/zeroshot/logical_deduction_three_objects.yaml +lm_eval/tasks/bbh/zeroshot/movie_recommendation.yaml +lm_eval/tasks/bbh/zeroshot/multistep_arithmetic_two.yaml +lm_eval/tasks/bbh/zeroshot/navigate.yaml +lm_eval/tasks/bbh/zeroshot/object_counting.yaml +lm_eval/tasks/bbh/zeroshot/penguins_in_a_table.yaml +lm_eval/tasks/bbh/zeroshot/reasoning_about_colored_objects.yaml +lm_eval/tasks/bbh/zeroshot/ruin_names.yaml +lm_eval/tasks/bbh/zeroshot/salient_translation_error_detection.yaml +lm_eval/tasks/bbh/zeroshot/snarks.yaml +lm_eval/tasks/bbh/zeroshot/sports_understanding.yaml +lm_eval/tasks/bbh/zeroshot/temporal_sequences.yaml +lm_eval/tasks/bbh/zeroshot/tracking_shuffled_objects_five_objects.yaml +lm_eval/tasks/bbh/zeroshot/tracking_shuffled_objects_seven_objects.yaml +lm_eval/tasks/bbh/zeroshot/tracking_shuffled_objects_three_objects.yaml +lm_eval/tasks/bbh/zeroshot/utils.py +lm_eval/tasks/bbh/zeroshot/web_of_lies.yaml +lm_eval/tasks/bbh/zeroshot/word_sorting.yaml +lm_eval/tasks/belebele/README.md +lm_eval/tasks/belebele/_default_template_yaml +lm_eval/tasks/belebele/_generate_configs.py +lm_eval/tasks/belebele/belebele_acm_Arab.yaml +lm_eval/tasks/belebele/belebele_afr_Latn.yaml +lm_eval/tasks/belebele/belebele_als_Latn.yaml +lm_eval/tasks/belebele/belebele_amh_Ethi.yaml +lm_eval/tasks/belebele/belebele_apc_Arab.yaml +lm_eval/tasks/belebele/belebele_arb_Arab.yaml +lm_eval/tasks/belebele/belebele_arb_Latn.yaml +lm_eval/tasks/belebele/belebele_ars_Arab.yaml +lm_eval/tasks/belebele/belebele_ary_Arab.yaml +lm_eval/tasks/belebele/belebele_arz_Arab.yaml +lm_eval/tasks/belebele/belebele_asm_Beng.yaml +lm_eval/tasks/belebele/belebele_azj_Latn.yaml +lm_eval/tasks/belebele/belebele_bam_Latn.yaml +lm_eval/tasks/belebele/belebele_ben_Beng.yaml +lm_eval/tasks/belebele/belebele_ben_Latn.yaml +lm_eval/tasks/belebele/belebele_bod_Tibt.yaml +lm_eval/tasks/belebele/belebele_bul_Cyrl.yaml +lm_eval/tasks/belebele/belebele_cat_Latn.yaml +lm_eval/tasks/belebele/belebele_ceb_Latn.yaml +lm_eval/tasks/belebele/belebele_ces_Latn.yaml +lm_eval/tasks/belebele/belebele_ckb_Arab.yaml +lm_eval/tasks/belebele/belebele_dan_Latn.yaml +lm_eval/tasks/belebele/belebele_deu_Latn.yaml +lm_eval/tasks/belebele/belebele_ell_Grek.yaml +lm_eval/tasks/belebele/belebele_eng_Latn.yaml +lm_eval/tasks/belebele/belebele_est_Latn.yaml +lm_eval/tasks/belebele/belebele_eus_Latn.yaml +lm_eval/tasks/belebele/belebele_fin_Latn.yaml +lm_eval/tasks/belebele/belebele_fra_Latn.yaml +lm_eval/tasks/belebele/belebele_fuv_Latn.yaml +lm_eval/tasks/belebele/belebele_gaz_Latn.yaml +lm_eval/tasks/belebele/belebele_grn_Latn.yaml +lm_eval/tasks/belebele/belebele_guj_Gujr.yaml +lm_eval/tasks/belebele/belebele_hat_Latn.yaml +lm_eval/tasks/belebele/belebele_hau_Latn.yaml +lm_eval/tasks/belebele/belebele_heb_Hebr.yaml +lm_eval/tasks/belebele/belebele_hin_Deva.yaml +lm_eval/tasks/belebele/belebele_hin_Latn.yaml +lm_eval/tasks/belebele/belebele_hrv_Latn.yaml +lm_eval/tasks/belebele/belebele_hun_Latn.yaml +lm_eval/tasks/belebele/belebele_hye_Armn.yaml +lm_eval/tasks/belebele/belebele_ibo_Latn.yaml +lm_eval/tasks/belebele/belebele_ilo_Latn.yaml +lm_eval/tasks/belebele/belebele_ind_Latn.yaml +lm_eval/tasks/belebele/belebele_isl_Latn.yaml +lm_eval/tasks/belebele/belebele_ita_Latn.yaml +lm_eval/tasks/belebele/belebele_jav_Latn.yaml +lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml +lm_eval/tasks/belebele/belebele_kac_Latn.yaml +lm_eval/tasks/belebele/belebele_kan_Knda.yaml +lm_eval/tasks/belebele/belebele_kat_Geor.yaml +lm_eval/tasks/belebele/belebele_kaz_Cyrl.yaml +lm_eval/tasks/belebele/belebele_kea_Latn.yaml +lm_eval/tasks/belebele/belebele_khk_Cyrl.yaml +lm_eval/tasks/belebele/belebele_khm_Khmr.yaml +lm_eval/tasks/belebele/belebele_kin_Latn.yaml +lm_eval/tasks/belebele/belebele_kir_Cyrl.yaml +lm_eval/tasks/belebele/belebele_kor_Hang.yaml +lm_eval/tasks/belebele/belebele_lao_Laoo.yaml +lm_eval/tasks/belebele/belebele_lin_Latn.yaml +lm_eval/tasks/belebele/belebele_lit_Latn.yaml +lm_eval/tasks/belebele/belebele_lug_Latn.yaml +lm_eval/tasks/belebele/belebele_luo_Latn.yaml +lm_eval/tasks/belebele/belebele_lvs_Latn.yaml +lm_eval/tasks/belebele/belebele_mal_Mlym.yaml +lm_eval/tasks/belebele/belebele_mar_Deva.yaml +lm_eval/tasks/belebele/belebele_mkd_Cyrl.yaml +lm_eval/tasks/belebele/belebele_mlt_Latn.yaml +lm_eval/tasks/belebele/belebele_mri_Latn.yaml +lm_eval/tasks/belebele/belebele_mya_Mymr.yaml +lm_eval/tasks/belebele/belebele_nld_Latn.yaml +lm_eval/tasks/belebele/belebele_nob_Latn.yaml +lm_eval/tasks/belebele/belebele_npi_Deva.yaml +lm_eval/tasks/belebele/belebele_npi_Latn.yaml +lm_eval/tasks/belebele/belebele_nso_Latn.yaml +lm_eval/tasks/belebele/belebele_nya_Latn.yaml +lm_eval/tasks/belebele/belebele_ory_Orya.yaml +lm_eval/tasks/belebele/belebele_pan_Guru.yaml +lm_eval/tasks/belebele/belebele_pbt_Arab.yaml +lm_eval/tasks/belebele/belebele_pes_Arab.yaml +lm_eval/tasks/belebele/belebele_plt_Latn.yaml +lm_eval/tasks/belebele/belebele_pol_Latn.yaml +lm_eval/tasks/belebele/belebele_por_Latn.yaml +lm_eval/tasks/belebele/belebele_ron_Latn.yaml +lm_eval/tasks/belebele/belebele_rus_Cyrl.yaml +lm_eval/tasks/belebele/belebele_shn_Mymr.yaml +lm_eval/tasks/belebele/belebele_sin_Latn.yaml +lm_eval/tasks/belebele/belebele_sin_Sinh.yaml +lm_eval/tasks/belebele/belebele_slk_Latn.yaml +lm_eval/tasks/belebele/belebele_slv_Latn.yaml +lm_eval/tasks/belebele/belebele_sna_Latn.yaml +lm_eval/tasks/belebele/belebele_snd_Arab.yaml +lm_eval/tasks/belebele/belebele_som_Latn.yaml +lm_eval/tasks/belebele/belebele_sot_Latn.yaml +lm_eval/tasks/belebele/belebele_spa_Latn.yaml +lm_eval/tasks/belebele/belebele_srp_Cyrl.yaml +lm_eval/tasks/belebele/belebele_ssw_Latn.yaml +lm_eval/tasks/belebele/belebele_sun_Latn.yaml +lm_eval/tasks/belebele/belebele_swe_Latn.yaml +lm_eval/tasks/belebele/belebele_swh_Latn.yaml +lm_eval/tasks/belebele/belebele_tam_Taml.yaml +lm_eval/tasks/belebele/belebele_tel_Telu.yaml +lm_eval/tasks/belebele/belebele_tgk_Cyrl.yaml +lm_eval/tasks/belebele/belebele_tgl_Latn.yaml +lm_eval/tasks/belebele/belebele_tha_Thai.yaml +lm_eval/tasks/belebele/belebele_tir_Ethi.yaml +lm_eval/tasks/belebele/belebele_tsn_Latn.yaml +lm_eval/tasks/belebele/belebele_tso_Latn.yaml +lm_eval/tasks/belebele/belebele_tur_Latn.yaml +lm_eval/tasks/belebele/belebele_ukr_Cyrl.yaml +lm_eval/tasks/belebele/belebele_urd_Arab.yaml +lm_eval/tasks/belebele/belebele_urd_Latn.yaml +lm_eval/tasks/belebele/belebele_uzn_Latn.yaml +lm_eval/tasks/belebele/belebele_vie_Latn.yaml +lm_eval/tasks/belebele/belebele_war_Latn.yaml +lm_eval/tasks/belebele/belebele_wol_Latn.yaml +lm_eval/tasks/belebele/belebele_xho_Latn.yaml +lm_eval/tasks/belebele/belebele_yor_Latn.yaml +lm_eval/tasks/belebele/belebele_zho_Hans.yaml +lm_eval/tasks/belebele/belebele_zho_Hant.yaml +lm_eval/tasks/belebele/belebele_zsm_Latn.yaml +lm_eval/tasks/belebele/belebele_zul_Latn.yaml +lm_eval/tasks/benchmarks/minerva_math.yaml +lm_eval/tasks/benchmarks/openllm.yaml +lm_eval/tasks/benchmarks/pythia.yaml +lm_eval/tasks/benchmarks/t0_eval.yaml +lm_eval/tasks/benchmarks/flan/_held_in_template_yaml +lm_eval/tasks/benchmarks/flan/flan_held_in.yaml +lm_eval/tasks/benchmarks/flan/flan_held_out.yaml +lm_eval/tasks/benchmarks/multimedqa/README.md +lm_eval/tasks/benchmarks/multimedqa/multimedqa.yaml +lm_eval/tasks/bigbench/README.md +lm_eval/tasks/bigbench/generate_tasks.py +lm_eval/tasks/bigbench/generate_until_template_yaml +lm_eval/tasks/bigbench/multiple_choice_template_yaml +lm_eval/tasks/bigbench/push_bigbench_dataset.py +lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml +lm_eval/tasks/bigbench/generate_until/anachronisms.yaml +lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml +lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml +lm_eval/tasks/bigbench/generate_until/arithmetic.yaml +lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml +lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml +lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml +lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml +lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml +lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml +lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml +lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml +lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml +lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml +lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml +lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml +lm_eval/tasks/bigbench/generate_until/code_line_description.yaml +lm_eval/tasks/bigbench/generate_until/codenames.yaml +lm_eval/tasks/bigbench/generate_until/color.yaml +lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml +lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml +lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml +lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml +lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml +lm_eval/tasks/bigbench/generate_until/crass_ai.yaml +lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml +lm_eval/tasks/bigbench/generate_until/cryptonite.yaml +lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml +lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml +lm_eval/tasks/bigbench/generate_until/date_understanding.yaml +lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml +lm_eval/tasks/bigbench/generate_until/disfl_qa.yaml +lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml +lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml +lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml +lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml +lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml +lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml +lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml +lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml +lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml +lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml +lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml +lm_eval/tasks/bigbench/generate_until/fact_checker.yaml +lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml +lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml +lm_eval/tasks/bigbench/generate_until/figure_of_speech_detection.yaml +lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml +lm_eval/tasks/bigbench/generate_until/gem.yaml +lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml +lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml +lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml +lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml +lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml +lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml +lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml +lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml +lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml +lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml +lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml +lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml +lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml +lm_eval/tasks/bigbench/generate_until/implicatures.yaml +lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml +lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml +lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml +lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml +lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml +lm_eval/tasks/bigbench/generate_until/irony_identification.yaml +lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml +lm_eval/tasks/bigbench/generate_until/kannada.yaml +lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml +lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml +lm_eval/tasks/bigbench/generate_until/language_games.yaml +lm_eval/tasks/bigbench/generate_until/language_identification.yaml +lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml +lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml +lm_eval/tasks/bigbench/generate_until/list_functions.yaml +lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml +lm_eval/tasks/bigbench/generate_until/logical_args.yaml +lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml +lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml +lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml +lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml +lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml +lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml +lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml +lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml +lm_eval/tasks/bigbench/generate_until/misconceptions.yaml +lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml +lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml +lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml +lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml +lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml +lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml +lm_eval/tasks/bigbench/generate_until/multiemo.yaml +lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml +lm_eval/tasks/bigbench/generate_until/navigate.yaml +lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml +lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml +lm_eval/tasks/bigbench/generate_until/object_counting.yaml +lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml +lm_eval/tasks/bigbench/generate_until/operators.yaml +lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml +lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml +lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml +lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml +lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml +lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml +lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml +lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml +lm_eval/tasks/bigbench/generate_until/physics.yaml +lm_eval/tasks/bigbench/generate_until/physics_questions.yaml +lm_eval/tasks/bigbench/generate_until/play_dialog_same_or_different.yaml +lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml +lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml +lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml +lm_eval/tasks/bigbench/generate_until/question_selection.yaml +lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml +lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml +lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml +lm_eval/tasks/bigbench/generate_until/rephrase.yaml +lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml +lm_eval/tasks/bigbench/generate_until/ruin_names.yaml +lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml +lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml +lm_eval/tasks/bigbench/generate_until/semantic_parsing_in_context_sparc.yaml +lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml +lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml +lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml +lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml +lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml +lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml +lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml +lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml +lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml +lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml +lm_eval/tasks/bigbench/generate_until/snarks.yaml +lm_eval/tasks/bigbench/generate_until/social_iqa.yaml +lm_eval/tasks/bigbench/generate_until/social_support.yaml +lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml +lm_eval/tasks/bigbench/generate_until/strange_stories.yaml +lm_eval/tasks/bigbench/generate_until/strategyqa.yaml +lm_eval/tasks/bigbench/generate_until/sufficient_information.yaml +lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml +lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml +lm_eval/tasks/bigbench/generate_until/swedish_to_german_proverbs.yaml +lm_eval/tasks/bigbench/generate_until/symbol_interpretation.yaml +lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml +lm_eval/tasks/bigbench/generate_until/tense.yaml +lm_eval/tasks/bigbench/generate_until/timedial.yaml +lm_eval/tasks/bigbench/generate_until/topical_chat.yaml +lm_eval/tasks/bigbench/generate_until/tracking_shuffled_objects.yaml +lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml +lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml +lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml +lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml +lm_eval/tasks/bigbench/generate_until/unnatural_in_context_learning.yaml +lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml +lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml +lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml +lm_eval/tasks/bigbench/generate_until/winowhy.yaml +lm_eval/tasks/bigbench/generate_until/word_sorting.yaml +lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml +lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml +lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml +lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml +lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml +lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml +lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml +lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml +lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml +lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml +lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml +lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml +lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml +lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml +lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml +lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml +lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml +lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml +lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml +lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml +lm_eval/tasks/bigbench/multiple_choice/codenames.yaml +lm_eval/tasks/bigbench/multiple_choice/color.yaml +lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml +lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml +lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml +lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml +lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml +lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml +lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml +lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml +lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml +lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml +lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml +lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml +lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml +lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml +lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml +lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml +lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml +lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml +lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml +lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml +lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml +lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml +lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml +lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml +lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml +lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml +lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml +lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml +lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml +lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml +lm_eval/tasks/bigbench/multiple_choice/gem.yaml +lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml +lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml +lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml +lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml +lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml +lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml +lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml +lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml +lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml +lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml +lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml +lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml +lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml +lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml +lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml +lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml +lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml +lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml +lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml +lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml +lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml +lm_eval/tasks/bigbench/multiple_choice/kannada.yaml +lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml +lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml +lm_eval/tasks/bigbench/multiple_choice/language_games.yaml +lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml +lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml +lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml +lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml +lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml +lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml +lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml +lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml +lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml +lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml +lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml +lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml +lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml +lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml +lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml +lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml +lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml +lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml +lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml +lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml +lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml +lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml +lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml +lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml +lm_eval/tasks/bigbench/multiple_choice/navigate.yaml +lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml +lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml +lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml +lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml +lm_eval/tasks/bigbench/multiple_choice/operators.yaml +lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml +lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml +lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml +lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml +lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml +lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml +lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml +lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml +lm_eval/tasks/bigbench/multiple_choice/physics.yaml +lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml +lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml +lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml +lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml +lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml +lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml +lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml +lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml +lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml +lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml +lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml +lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml +lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml +lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml +lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml +lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml +lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml +lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml +lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml +lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml +lm_eval/tasks/bigbench/multiple_choice/snarks.yaml +lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml +lm_eval/tasks/bigbench/multiple_choice/social_support.yaml +lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml +lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml +lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml +lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml +lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml +lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml +lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml +lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml +lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml +lm_eval/tasks/bigbench/multiple_choice/tense.yaml +lm_eval/tasks/bigbench/multiple_choice/timedial.yaml +lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml +lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml +lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml +lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml +lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml +lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml +lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml +lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml +lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml +lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml +lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml +lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml +lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml +lm_eval/tasks/blimp/README.md +lm_eval/tasks/blimp/_template_yaml +lm_eval/tasks/blimp/adjunct_island.yaml +lm_eval/tasks/blimp/anaphor_gender_agreement.yaml +lm_eval/tasks/blimp/anaphor_number_agreement.yaml +lm_eval/tasks/blimp/animate_subject_passive.yaml +lm_eval/tasks/blimp/animate_subject_trans.yaml +lm_eval/tasks/blimp/causative.yaml +lm_eval/tasks/blimp/complex_NP_island.yaml +lm_eval/tasks/blimp/coordinate_structure_constraint_complex_left_branch.yaml +lm_eval/tasks/blimp/coordinate_structure_constraint_object_extraction.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_1.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_2.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_irregular_1.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_irregular_2.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_2.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_1.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_with_adj_irregular_2.yaml +lm_eval/tasks/blimp/determiner_noun_agreement_with_adjective_1.yaml +lm_eval/tasks/blimp/distractor_agreement_relational_noun.yaml +lm_eval/tasks/blimp/distractor_agreement_relative_clause.yaml +lm_eval/tasks/blimp/drop_argument.yaml +lm_eval/tasks/blimp/ellipsis_n_bar_1.yaml +lm_eval/tasks/blimp/ellipsis_n_bar_2.yaml +lm_eval/tasks/blimp/existential_there_object_raising.yaml +lm_eval/tasks/blimp/existential_there_quantifiers_1.yaml +lm_eval/tasks/blimp/existential_there_quantifiers_2.yaml +lm_eval/tasks/blimp/existential_there_subject_raising.yaml +lm_eval/tasks/blimp/expletive_it_object_raising.yaml +lm_eval/tasks/blimp/generate_configs.py +lm_eval/tasks/blimp/inchoative.yaml +lm_eval/tasks/blimp/intransitive.yaml +lm_eval/tasks/blimp/irregular_past_participle_adjectives.yaml +lm_eval/tasks/blimp/irregular_past_participle_verbs.yaml +lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_1.yaml +lm_eval/tasks/blimp/irregular_plural_subject_verb_agreement_2.yaml +lm_eval/tasks/blimp/left_branch_island_echo_question.yaml +lm_eval/tasks/blimp/left_branch_island_simple_question.yaml +lm_eval/tasks/blimp/matrix_question_npi_licensor_present.yaml +lm_eval/tasks/blimp/npi_present_1.yaml +lm_eval/tasks/blimp/npi_present_2.yaml +lm_eval/tasks/blimp/only_npi_licensor_present.yaml +lm_eval/tasks/blimp/only_npi_scope.yaml +lm_eval/tasks/blimp/passive_1.yaml +lm_eval/tasks/blimp/passive_2.yaml +lm_eval/tasks/blimp/principle_A_c_command.yaml +lm_eval/tasks/blimp/principle_A_case_1.yaml +lm_eval/tasks/blimp/principle_A_case_2.yaml +lm_eval/tasks/blimp/principle_A_domain_1.yaml +lm_eval/tasks/blimp/principle_A_domain_2.yaml +lm_eval/tasks/blimp/principle_A_domain_3.yaml +lm_eval/tasks/blimp/principle_A_reconstruction.yaml +lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_1.yaml +lm_eval/tasks/blimp/regular_plural_subject_verb_agreement_2.yaml +lm_eval/tasks/blimp/sentential_negation_npi_licensor_present.yaml +lm_eval/tasks/blimp/sentential_negation_npi_scope.yaml +lm_eval/tasks/blimp/sentential_subject_island.yaml +lm_eval/tasks/blimp/superlative_quantifiers_1.yaml +lm_eval/tasks/blimp/superlative_quantifiers_2.yaml +lm_eval/tasks/blimp/tough_vs_raising_1.yaml +lm_eval/tasks/blimp/tough_vs_raising_2.yaml +lm_eval/tasks/blimp/transitive.yaml +lm_eval/tasks/blimp/wh_island.yaml +lm_eval/tasks/blimp/wh_questions_object_gap.yaml +lm_eval/tasks/blimp/wh_questions_subject_gap.yaml +lm_eval/tasks/blimp/wh_questions_subject_gap_long_distance.yaml +lm_eval/tasks/blimp/wh_vs_that_no_gap.yaml +lm_eval/tasks/blimp/wh_vs_that_no_gap_long_distance.yaml +lm_eval/tasks/blimp/wh_vs_that_with_gap.yaml +lm_eval/tasks/blimp/wh_vs_that_with_gap_long_distance.yaml +lm_eval/tasks/ceval/README.md +lm_eval/tasks/ceval/_default_ceval_yaml +lm_eval/tasks/ceval/_generate_configs.py +lm_eval/tasks/ceval/ceval-valid_accountant.yaml +lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml +lm_eval/tasks/ceval/ceval-valid_art_studies.yaml +lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml +lm_eval/tasks/ceval/ceval-valid_business_administration.yaml +lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml +lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml +lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml +lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml +lm_eval/tasks/ceval/ceval-valid_college_economics.yaml +lm_eval/tasks/ceval/ceval-valid_college_physics.yaml +lm_eval/tasks/ceval/ceval-valid_college_programming.yaml +lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml +lm_eval/tasks/ceval/ceval-valid_computer_network.yaml +lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml +lm_eval/tasks/ceval/ceval-valid_education_science.yaml +lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml +lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml +lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml +lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml +lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml +lm_eval/tasks/ceval/ceval-valid_law.yaml +lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml +lm_eval/tasks/ceval/ceval-valid_logic.yaml +lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml +lm_eval/tasks/ceval/ceval-valid_marxism.yaml +lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_physics.yaml +lm_eval/tasks/ceval/ceval-valid_middle_school_politics.yaml +lm_eval/tasks/ceval/ceval-valid_modern_chinese_history.yaml +lm_eval/tasks/ceval/ceval-valid_operating_system.yaml +lm_eval/tasks/ceval/ceval-valid_physician.yaml +lm_eval/tasks/ceval/ceval-valid_plant_protection.yaml +lm_eval/tasks/ceval/ceval-valid_probability_and_statistics.yaml +lm_eval/tasks/ceval/ceval-valid_professional_tour_guide.yaml +lm_eval/tasks/ceval/ceval-valid_sports_science.yaml +lm_eval/tasks/ceval/ceval-valid_tax_accountant.yaml +lm_eval/tasks/ceval/ceval-valid_teacher_qualification.yaml +lm_eval/tasks/ceval/ceval-valid_urban_and_rural_planner.yaml +lm_eval/tasks/ceval/ceval-valid_veterinary_medicine.yaml +lm_eval/tasks/cmmlu/README.md +lm_eval/tasks/cmmlu/_default_template_yaml +lm_eval/tasks/cmmlu/_generate_configs.py +lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml +lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml +lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml +lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml +lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml +lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_civil_service_exam.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_driving_rule.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_food_culture.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_literature.yaml +lm_eval/tasks/cmmlu/cmmlu_default_chinese_teacher_qualification.yaml +lm_eval/tasks/cmmlu/cmmlu_default_clinical_knowledge.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_actuarial_science.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml +lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml +lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml +lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_construction_project_management.yaml +lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_education.yaml +lm_eval/tasks/cmmlu/cmmlu_default_electrical_engineering.yaml +lm_eval/tasks/cmmlu/cmmlu_default_elementary_chinese.yaml +lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml +lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml +lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_geography.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_mathematics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_high_school_politics.yaml +lm_eval/tasks/cmmlu/cmmlu_default_human_sexuality.yaml +lm_eval/tasks/cmmlu/cmmlu_default_international_law.yaml +lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml +lm_eval/tasks/cmmlu/cmmlu_default_jurisprudence.yaml +lm_eval/tasks/cmmlu/cmmlu_default_legal_and_moral_basis.yaml +lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml +lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml +lm_eval/tasks/cmmlu/cmmlu_default_management.yaml +lm_eval/tasks/cmmlu/cmmlu_default_marketing.yaml +lm_eval/tasks/cmmlu/cmmlu_default_marxist_theory.yaml +lm_eval/tasks/cmmlu/cmmlu_default_modern_chinese.yaml +lm_eval/tasks/cmmlu/cmmlu_default_nutrition.yaml +lm_eval/tasks/cmmlu/cmmlu_default_philosophy.yaml +lm_eval/tasks/cmmlu/cmmlu_default_professional_accounting.yaml +lm_eval/tasks/cmmlu/cmmlu_default_professional_law.yaml +lm_eval/tasks/cmmlu/cmmlu_default_professional_medicine.yaml +lm_eval/tasks/cmmlu/cmmlu_default_professional_psychology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_public_relations.yaml +lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml +lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml +lm_eval/tasks/cmmlu/cmmlu_default_traditional_chinese_medicine.yaml +lm_eval/tasks/cmmlu/cmmlu_default_virology.yaml +lm_eval/tasks/cmmlu/cmmlu_default_world_history.yaml +lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml +lm_eval/tasks/code_x_glue/code-text/bleu.py +lm_eval/tasks/code_x_glue/code-text/go.yaml +lm_eval/tasks/code_x_glue/code-text/java.yaml +lm_eval/tasks/code_x_glue/code-text/javascript.yaml +lm_eval/tasks/code_x_glue/code-text/php.yaml +lm_eval/tasks/code_x_glue/code-text/python.yaml +lm_eval/tasks/code_x_glue/code-text/ruby.yaml +lm_eval/tasks/code_x_glue/code-text/utils.py +lm_eval/tasks/coqa/README.md +lm_eval/tasks/coqa/default.yaml +lm_eval/tasks/coqa/utils.py +lm_eval/tasks/crows_pairs/README.md +lm_eval/tasks/crows_pairs/crows_pairs_english.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_autre.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_physical_appearance.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_race_color.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml +lm_eval/tasks/crows_pairs/crows_pairs_english_socioeconomic.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_age.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_autre.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_physical_appearance.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_race_color.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_sexual_orientation.yaml +lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml +lm_eval/tasks/crows_pairs/utils.py +lm_eval/tasks/csatqa/_default_csatqa_yaml +lm_eval/tasks/csatqa/_generate_configs.py +lm_eval/tasks/csatqa/csatqa_gr.yaml +lm_eval/tasks/csatqa/csatqa_li.yaml +lm_eval/tasks/csatqa/csatqa_rch.yaml +lm_eval/tasks/csatqa/csatqa_rcs.yaml +lm_eval/tasks/csatqa/csatqa_rcss.yaml +lm_eval/tasks/csatqa/csatqa_wr.yaml +lm_eval/tasks/csatqa/utils.py +lm_eval/tasks/drop/README.md +lm_eval/tasks/drop/default.yaml +lm_eval/tasks/drop/utils.py +lm_eval/tasks/eq_bench/README.md +lm_eval/tasks/eq_bench/default.yaml +lm_eval/tasks/eq_bench/utils.py +lm_eval/tasks/eus_exams/README.md +lm_eval/tasks/eus_exams/configs.py +lm_eval/tasks/eus_exams/eus_exams +lm_eval/tasks/eus_exams/eus_exams_es +lm_eval/tasks/eus_exams/eus_exams_es_ejadministrativo.yaml +lm_eval/tasks/eus_exams/eus_exams_es_ejauxiliar.yaml +lm_eval/tasks/eus_exams/eus_exams_es_ejsubalterno.yaml +lm_eval/tasks/eus_exams/eus_exams_es_ejtecnico.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeayuntamientovitoria.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opebilbao.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehuadmin.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehubiblio.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehuderecho.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehueconomicas.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehuempresariales.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakiauxenf.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakicelador.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakijuridico.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakioperario.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakitecnico.yaml +lm_eval/tasks/eus_exams/eus_exams_es_opeosakivarios.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza1c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza2c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza3c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza4c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza5c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza6c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml +lm_eval/tasks/eus_exams/eus_exams_es_osakidetza9c.yaml +lm_eval/tasks/eus_exams/eus_exams_eu +lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntza.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_ejlaguntzaile.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_ejteknikari.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opebilbaoeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehuadmineu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehuauxeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehuderechoeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehueconomicaseu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehuempresarialeseu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehusubalternoeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehutecnicoeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeehuteknikarib.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxenfeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiauxeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiceladoreu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakienfeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakioperarioeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakitecnicoeu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_opeosakivarioseu.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza1e.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza3e.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza5e.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza6e.yaml +lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza7e.yaml +lm_eval/tasks/eus_exams/utils.py +lm_eval/tasks/eus_proficiency/README.md +lm_eval/tasks/eus_proficiency/eus_proficiency.yaml +lm_eval/tasks/eus_reading/README.md +lm_eval/tasks/eus_reading/eus_reading.yaml +lm_eval/tasks/eus_reading/utils.py +lm_eval/tasks/eus_trivia/README.md +lm_eval/tasks/eus_trivia/eus_trivia.yaml +lm_eval/tasks/eus_trivia/utils.py +lm_eval/tasks/fld/README.md +lm_eval/tasks/fld/fld_default.yaml +lm_eval/tasks/fld/fld_star.yaml +lm_eval/tasks/french_bench/README.md +lm_eval/tasks/french_bench/_default_template_yaml +lm_eval/tasks/french_bench/french_bench_arc_challenge.yaml +lm_eval/tasks/french_bench/french_bench_boolqa.yaml +lm_eval/tasks/french_bench/french_bench_fquadv2.yaml +lm_eval/tasks/french_bench/french_bench_fquadv2_bool.yaml +lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml +lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml +lm_eval/tasks/french_bench/french_bench_grammar.yaml +lm_eval/tasks/french_bench/french_bench_hellaswag.yaml +lm_eval/tasks/french_bench/french_bench_multifquad.yaml +lm_eval/tasks/french_bench/french_bench_opus_perplexity.yaml +lm_eval/tasks/french_bench/french_bench_orangesum_abstract.yaml +lm_eval/tasks/french_bench/french_bench_orangesum_title.yaml +lm_eval/tasks/french_bench/french_bench_reading_comp.yaml +lm_eval/tasks/french_bench/french_bench_topic_based_nli.yaml +lm_eval/tasks/french_bench/french_bench_trivia.yaml +lm_eval/tasks/french_bench/french_bench_vocab.yaml +lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml +lm_eval/tasks/french_bench/french_bench_xnli.yaml +lm_eval/tasks/french_bench/preprocess_wikitext.py +lm_eval/tasks/french_bench/utils.py +lm_eval/tasks/glue/README.md +lm_eval/tasks/glue/cola/default.yaml +lm_eval/tasks/glue/mnli/default.yaml +lm_eval/tasks/glue/mnli/mismatch.yaml +lm_eval/tasks/glue/mnli/utils.py +lm_eval/tasks/glue/mrpc/default.yaml +lm_eval/tasks/glue/qnli/default.yaml +lm_eval/tasks/glue/qqp/default.yaml +lm_eval/tasks/glue/rte/default.yaml +lm_eval/tasks/glue/sst2/default.yaml +lm_eval/tasks/glue/wnli/default.yaml +lm_eval/tasks/gpqa/README.md +lm_eval/tasks/gpqa/cot_n_shot/_generate_configs.py +lm_eval/tasks/gpqa/cot_n_shot/_gpqa_cot_n_shot_yaml +lm_eval/tasks/gpqa/cot_n_shot/gpqa_diamond_cot_n_shot.yaml +lm_eval/tasks/gpqa/cot_n_shot/gpqa_extended_cot_n_shot.yaml +lm_eval/tasks/gpqa/cot_n_shot/gpqa_main_cot_n_shot.yaml +lm_eval/tasks/gpqa/cot_n_shot/utils.py +lm_eval/tasks/gpqa/cot_zeroshot/_generate_configs.py +lm_eval/tasks/gpqa/cot_zeroshot/_gpqa_cot_zeroshot_yaml +lm_eval/tasks/gpqa/cot_zeroshot/gpqa_diamond_cot_zeroshot.yaml +lm_eval/tasks/gpqa/cot_zeroshot/gpqa_extended_cot_zeroshot.yaml +lm_eval/tasks/gpqa/cot_zeroshot/gpqa_main_cot_zeroshot.yaml +lm_eval/tasks/gpqa/cot_zeroshot/utils.py +lm_eval/tasks/gpqa/generative/_generate_configs.py +lm_eval/tasks/gpqa/generative/_gpqa_generative_n_shot_yaml +lm_eval/tasks/gpqa/generative/gpqa_diamond_generative_n_shot.yaml +lm_eval/tasks/gpqa/generative/gpqa_extended_generative_n_shot.yaml +lm_eval/tasks/gpqa/generative/gpqa_main_generative_n_shot.yaml +lm_eval/tasks/gpqa/generative/utils.py +lm_eval/tasks/gpqa/n_shot/_generate_configs.py +lm_eval/tasks/gpqa/n_shot/_gpqa_n_shot_yaml +lm_eval/tasks/gpqa/n_shot/gpqa_diamond_n_shot.yaml +lm_eval/tasks/gpqa/n_shot/gpqa_extended_n_shot.yaml +lm_eval/tasks/gpqa/n_shot/gpqa_main_n_shot.yaml +lm_eval/tasks/gpqa/n_shot/utils.py +lm_eval/tasks/gpqa/zeroshot/_generate_configs.py +lm_eval/tasks/gpqa/zeroshot/_gpqa_zeroshot_yaml +lm_eval/tasks/gpqa/zeroshot/gpqa_diamond_zeroshot.yaml +lm_eval/tasks/gpqa/zeroshot/gpqa_extended_zeroshot.yaml +lm_eval/tasks/gpqa/zeroshot/gpqa_main_zeroshot.yaml +lm_eval/tasks/gpqa/zeroshot/utils.py +lm_eval/tasks/gsm8k/README.md +lm_eval/tasks/gsm8k/gsm8k-cot-self-consistency.yaml +lm_eval/tasks/gsm8k/gsm8k-cot-zeroshot.yaml +lm_eval/tasks/gsm8k/gsm8k-cot.yaml +lm_eval/tasks/gsm8k/gsm8k.yaml +lm_eval/tasks/haerae/README.md +lm_eval/tasks/haerae/_default_haerae_yaml +lm_eval/tasks/haerae/haerae_gk.yaml +lm_eval/tasks/haerae/haerae_hi.yaml +lm_eval/tasks/haerae/haerae_lw.yaml +lm_eval/tasks/haerae/haerae_rw.yaml +lm_eval/tasks/haerae/haerae_sn.yaml +lm_eval/tasks/headqa/README.md +lm_eval/tasks/headqa/headqa_en.yaml +lm_eval/tasks/headqa/headqa_es.yaml +lm_eval/tasks/hellaswag/README.md +lm_eval/tasks/hellaswag/hellaswag.yaml +lm_eval/tasks/hellaswag/utils.py +lm_eval/tasks/hellaswag/__pycache__/utils.cpython-310.pyc +lm_eval/tasks/hendrycks_ethics/README.md +lm_eval/tasks/hendrycks_ethics/commonsense.yaml +lm_eval/tasks/hendrycks_ethics/deontology.yaml +lm_eval/tasks/hendrycks_ethics/justice.yaml +lm_eval/tasks/hendrycks_ethics/utilitarianism.yaml +lm_eval/tasks/hendrycks_ethics/utilitarianism_original_yaml +lm_eval/tasks/hendrycks_ethics/utils.py +lm_eval/tasks/hendrycks_ethics/virtue.yaml +lm_eval/tasks/ifeval/README.md +lm_eval/tasks/ifeval/ifeval.yaml +lm_eval/tasks/ifeval/instructions.py +lm_eval/tasks/ifeval/instructions_registry.py +lm_eval/tasks/ifeval/instructions_util.py +lm_eval/tasks/ifeval/utils.py +lm_eval/tasks/indiccopa/indiccopa.yaml +lm_eval/tasks/indiccopa/indiccopa_as.yaml +lm_eval/tasks/indiccopa/indiccopa_bn.yaml +lm_eval/tasks/indiccopa/indiccopa_common_yaml +lm_eval/tasks/indiccopa/indiccopa_gu.yaml +lm_eval/tasks/indiccopa/indiccopa_hi.yaml +lm_eval/tasks/indiccopa/indiccopa_kn.yaml +lm_eval/tasks/indiccopa/indiccopa_mai.yaml +lm_eval/tasks/indiccopa/indiccopa_ml.yaml +lm_eval/tasks/indiccopa/indiccopa_mr.yaml +lm_eval/tasks/indiccopa/indiccopa_ne.yaml +lm_eval/tasks/indiccopa/indiccopa_or.yaml +lm_eval/tasks/indiccopa/indiccopa_sa.yaml +lm_eval/tasks/indiccopa/indiccopa_sd.yaml +lm_eval/tasks/indiccopa/indiccopa_ta.yaml +lm_eval/tasks/indiccopa/indiccopa_te.yaml +lm_eval/tasks/indiccopa/utils.py +lm_eval/tasks/indiccopa/__pycache__/utils.cpython-310.pyc +lm_eval/tasks/indicsentiment/indicsentiment-as.yaml +lm_eval/tasks/indicsentiment/indicsentiment-bd.yaml +lm_eval/tasks/indicsentiment/indicsentiment-bn.yaml +lm_eval/tasks/indicsentiment/indicsentiment-gu.yaml +lm_eval/tasks/indicsentiment/indicsentiment-hi.yaml +lm_eval/tasks/indicsentiment/indicsentiment-kn.yaml +lm_eval/tasks/indicsentiment/indicsentiment-ml.yaml +lm_eval/tasks/indicsentiment/indicsentiment-mr.yaml +lm_eval/tasks/indicsentiment/indicsentiment-or.yaml +lm_eval/tasks/indicsentiment/indicsentiment-pa.yaml +lm_eval/tasks/indicsentiment/indicsentiment-ta.yaml +lm_eval/tasks/indicsentiment/indicsentiment-te.yaml +lm_eval/tasks/indicsentiment/indicsentiment.yaml +lm_eval/tasks/indictranslation/README.md +lm_eval/tasks/indictranslation/flores_en-as.yaml +lm_eval/tasks/indictranslation/flores_en-bn.yaml +lm_eval/tasks/indictranslation/flores_en-gu.yaml +lm_eval/tasks/indictranslation/flores_en-hi.yaml +lm_eval/tasks/indictranslation/flores_en-kn.yaml +lm_eval/tasks/indictranslation/flores_en-ml.yaml +lm_eval/tasks/indictranslation/flores_en-mr.yaml +lm_eval/tasks/indictranslation/flores_en-or.yaml +lm_eval/tasks/indictranslation/flores_en-pa.yaml +lm_eval/tasks/indictranslation/flores_en-ta.yaml +lm_eval/tasks/indictranslation/flores_en-te.yaml +lm_eval/tasks/indictranslation/iwslt2017_ar-en.yaml +lm_eval/tasks/indictranslation/iwslt2017_en-ar.yaml +lm_eval/tasks/indictranslation/utils.py +lm_eval/tasks/indictranslation/wmt14_en-fr.yaml +lm_eval/tasks/indictranslation/wmt14_fr-en.yaml +lm_eval/tasks/indictranslation/wmt16_de-en.yaml +lm_eval/tasks/indictranslation/wmt16_en-de.yaml +lm_eval/tasks/indictranslation/wmt16_en-ro.yaml +lm_eval/tasks/indictranslation/wmt16_ro-en.yaml +lm_eval/tasks/indictranslation/wmt_common_yaml +lm_eval/tasks/indicwikibio/indicwikibio_as.yaml +lm_eval/tasks/indicwikibio/indicwikibio_bn.yaml +lm_eval/tasks/indicwikibio/indicwikibio_hi.yaml +lm_eval/tasks/indicwikibio/indicwikibio_kn.yaml +lm_eval/tasks/indicwikibio/indicwikibio_ml.yaml +lm_eval/tasks/indicwikibio/indicwikibio_or.yaml +lm_eval/tasks/indicwikibio/indicwikibio_pa.yaml +lm_eval/tasks/indicwikibio/indicwikibio_ta.yaml +lm_eval/tasks/indicwikibio/indicwikibio_te.yaml +lm_eval/tasks/indicxnli/indicxnli_as.yaml +lm_eval/tasks/indicxnli/indicxnli_bn.yaml +lm_eval/tasks/indicxnli/indicxnli_common_yaml +lm_eval/tasks/indicxnli/indicxnli_gu.yaml +lm_eval/tasks/indicxnli/indicxnli_hi.yaml +lm_eval/tasks/indicxnli/indicxnli_kn.yaml +lm_eval/tasks/indicxnli/indicxnli_ml.yaml +lm_eval/tasks/indicxnli/indicxnli_mr.yaml +lm_eval/tasks/indicxnli/indicxnli_or.yaml +lm_eval/tasks/indicxnli/indicxnli_pa.yaml +lm_eval/tasks/indicxnli/indicxnli_ta.yaml +lm_eval/tasks/indicxnli/indicxnli_te.yaml +lm_eval/tasks/indicxnli/utils.py +lm_eval/tasks/kmmlu/README.md +lm_eval/tasks/kmmlu/cot_hard/_cot_kmmlu_yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_accounting.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_agricultural_sciences.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_aviation_engineering_and_maintenance.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_biology.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_chemical_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_chemistry.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_civil_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_computer_science.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_construction.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_criminal_law.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_ecology.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_economics.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_education.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_electrical_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_electronics_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_energy_management.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_environmental_science.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_fashion.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_food_processing.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_gas_technology_and_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_geomatics.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_health.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_industrial_engineer.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_information_technology.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_interior_architecture_and_design.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_korean_history.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_law.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_machine_design_and_manufacturing.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_management.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_maritime_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_marketing.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_materials_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_math.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_mechanical_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_nondestructive_testing.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_patent.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_political_science_and_sociology.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_psychology.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_public_safety.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_railway_and_automotive_engineering.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_real_estate.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_refrigerating_machinery.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_social_welfare.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_taxation.yaml +lm_eval/tasks/kmmlu/cot_hard/kmmlu_cot_hard_telecommunications_and_wireless_technology.yaml +lm_eval/tasks/kmmlu/direct/_direct_kmmlu_yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_accounting.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_agricultural_sciences.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_aviation_engineering_and_maintenance.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_biology.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemical_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_chemistry.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_civil_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_computer_science.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_construction.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_criminal_law.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_ecology.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_economics.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_education.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_electrical_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_electronics_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_energy_management.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_environmental_science.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_fashion.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_food_processing.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_gas_technology_and_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_geomatics.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_health.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_industrial_engineer.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_information_technology.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_interior_architecture_and_design.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_korean_history.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_law.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_machine_design_and_manufacturing.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_management.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_maritime_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_marketing.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_materials_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_math.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_mechanical_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_nondestructive_testing.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_patent.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_political_science_and_sociology.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_psychology.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_public_safety.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_railway_and_automotive_engineering.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_real_estate.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_refrigerating_machinery.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_social_welfare.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_taxation.yaml +lm_eval/tasks/kmmlu/direct/kmmlu_direct_telecommunications_and_wireless_technology.yaml +lm_eval/tasks/kmmlu/direct_hard/_direct_hard_kmmlu_yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_accounting.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_agricultural_sciences.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_aviation_engineering_and_maintenance.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_biology.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_chemical_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_chemistry.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_civil_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_computer_science.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_construction.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_criminal_law.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_ecology.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_economics.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_education.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_electrical_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_electronics_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_energy_management.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_environmental_science.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_fashion.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_food_processing.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_gas_technology_and_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_geomatics.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_health.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_industrial_engineer.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_information_technology.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_interior_architecture_and_design.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_korean_history.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_law.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_machine_design_and_manufacturing.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_management.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_maritime_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_marketing.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_materials_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_math.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_mechanical_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_nondestructive_testing.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_patent.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_political_science_and_sociology.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_psychology.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_public_safety.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_railway_and_automotive_engineering.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_real_estate.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_refrigerating_machinery.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_social_welfare.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_taxation.yaml +lm_eval/tasks/kmmlu/direct_hard/kmmlu_direct_hard_telecommunications_and_wireless_technology.yaml +lm_eval/tasks/kmmlu/hard/_hard_kmmlu_yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_accounting.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_agricultural_sciences.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_aviation_engineering_and_maintenance.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_biology.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_chemical_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_chemistry.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_civil_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_computer_science.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_construction.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_criminal_law.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_ecology.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_economics.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_education.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_electrical_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_electronics_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_energy_management.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_environmental_science.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_fashion.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_food_processing.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_gas_technology_and_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_geomatics.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_health.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_industrial_engineer.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_information_technology.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_interior_architecture_and_design.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_korean_history.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_law.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_machine_design_and_manufacturing.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_management.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_maritime_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_marketing.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_materials_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_math.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_mechanical_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_nondestructive_testing.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_patent.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_political_science_and_sociology.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_psychology.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_public_safety.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_railway_and_automotive_engineering.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_real_estate.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_refrigerating_machinery.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_social_welfare.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_taxation.yaml +lm_eval/tasks/kmmlu/hard/kmmlu_hard_telecommunications_and_wireless_technology.yaml +lm_eval/tasks/kobest/README.md +lm_eval/tasks/kobest/kobest_boolq.yaml +lm_eval/tasks/kobest/kobest_copa.yaml +lm_eval/tasks/kobest/kobest_hellaswag.yaml +lm_eval/tasks/kobest/kobest_sentineg.yaml +lm_eval/tasks/kobest/kobest_wic.yaml +lm_eval/tasks/kobest/utils.py +lm_eval/tasks/kormedmcqa/README.md +lm_eval/tasks/kormedmcqa/kormedmcqa_doctor.yaml +lm_eval/tasks/kormedmcqa/kormedmcqa_nurse.yaml +lm_eval/tasks/kormedmcqa/kormedmcqa_pharm.yaml +lm_eval/tasks/lambada/README.md +lm_eval/tasks/lambada/lambada_openai.yaml +lm_eval/tasks/lambada/lambada_standard.yaml +lm_eval/tasks/lambada_cloze/README.md +lm_eval/tasks/lambada_cloze/lambada_openai_cloze.yaml +lm_eval/tasks/lambada_cloze/lambada_standard_cloze.yaml +lm_eval/tasks/lambada_multilingual/README.md +lm_eval/tasks/lambada_multilingual/lambada_mt_de.yaml +lm_eval/tasks/lambada_multilingual/lambada_mt_en.yaml +lm_eval/tasks/lambada_multilingual/lambada_mt_es.yaml +lm_eval/tasks/lambada_multilingual/lambada_mt_fr.yaml +lm_eval/tasks/lambada_multilingual/lambada_mt_it.yaml +lm_eval/tasks/logiqa/README.md +lm_eval/tasks/logiqa/logiqa.yaml +lm_eval/tasks/logiqa/utils_logiqa.py +lm_eval/tasks/logiqa2/README.md +lm_eval/tasks/logiqa2/logieval.yaml +lm_eval/tasks/logiqa2/logiqa2.yaml +lm_eval/tasks/logiqa2/utils_logiqa2.py +lm_eval/tasks/mathqa/README.md +lm_eval/tasks/mathqa/mathqa.yaml +lm_eval/tasks/mathqa/utils.py +lm_eval/tasks/mc_taco/README.md +lm_eval/tasks/mc_taco/default.yaml +lm_eval/tasks/medmcqa/medmcqa.yaml +lm_eval/tasks/medmcqa/utils_medmcqa.py +lm_eval/tasks/medqa/medqa.yaml +lm_eval/tasks/medqa/preprocess_medqa.py +lm_eval/tasks/mgsm/README.md +lm_eval/tasks/mgsm/gen_yaml.sh +lm_eval/tasks/mgsm/utils.py +lm_eval/tasks/mgsm/direct/direct_yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_bn.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_de.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_en.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_es.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_fr.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_ja.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_ru.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_sw.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_te.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_th.yaml +lm_eval/tasks/mgsm/direct/mgsm_direct_zh.yaml +lm_eval/tasks/mgsm/en_cot/cot_yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_bn.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_de.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_en.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_es.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_fr.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ja.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_ru.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_sw.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_te.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_th.yaml +lm_eval/tasks/mgsm/en_cot/mgsm_en_cot_zh.yaml +lm_eval/tasks/mgsm/native_cot/cot_yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_bn.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_de.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_en.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_es.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_fr.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ja.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_ru.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_sw.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_te.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_th.yaml +lm_eval/tasks/mgsm/native_cot/mgsm_native_cot_zh.yaml +lm_eval/tasks/minerva_math/README.md +lm_eval/tasks/minerva_math/minerva_math_algebra.yaml +lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml +lm_eval/tasks/minerva_math/minerva_math_geometry.yaml +lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml +lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml +lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml +lm_eval/tasks/minerva_math/minerva_math_precalc.yaml +lm_eval/tasks/minerva_math/utils.py +lm_eval/tasks/mmlu/_generate_configs.py +lm_eval/tasks/mmlu/default/_default_template_yaml +lm_eval/tasks/mmlu/default/_mmlu.yaml +lm_eval/tasks/mmlu/default/mmlu_abstract_algebra.yaml +lm_eval/tasks/mmlu/default/mmlu_anatomy.yaml +lm_eval/tasks/mmlu/default/mmlu_astronomy.yaml +lm_eval/tasks/mmlu/default/mmlu_business_ethics.yaml +lm_eval/tasks/mmlu/default/mmlu_clinical_knowledge.yaml +lm_eval/tasks/mmlu/default/mmlu_college_biology.yaml +lm_eval/tasks/mmlu/default/mmlu_college_chemistry.yaml +lm_eval/tasks/mmlu/default/mmlu_college_computer_science.yaml +lm_eval/tasks/mmlu/default/mmlu_college_mathematics.yaml +lm_eval/tasks/mmlu/default/mmlu_college_medicine.yaml +lm_eval/tasks/mmlu/default/mmlu_college_physics.yaml +lm_eval/tasks/mmlu/default/mmlu_computer_security.yaml +lm_eval/tasks/mmlu/default/mmlu_conceptual_physics.yaml +lm_eval/tasks/mmlu/default/mmlu_econometrics.yaml +lm_eval/tasks/mmlu/default/mmlu_electrical_engineering.yaml +lm_eval/tasks/mmlu/default/mmlu_elementary_mathematics.yaml +lm_eval/tasks/mmlu/default/mmlu_formal_logic.yaml +lm_eval/tasks/mmlu/default/mmlu_global_facts.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_biology.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_chemistry.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_computer_science.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_european_history.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_geography.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_government_and_politics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_macroeconomics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_mathematics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_microeconomics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_physics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_psychology.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_statistics.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_us_history.yaml +lm_eval/tasks/mmlu/default/mmlu_high_school_world_history.yaml +lm_eval/tasks/mmlu/default/mmlu_human_aging.yaml +lm_eval/tasks/mmlu/default/mmlu_human_sexuality.yaml +lm_eval/tasks/mmlu/default/mmlu_international_law.yaml +lm_eval/tasks/mmlu/default/mmlu_jurisprudence.yaml +lm_eval/tasks/mmlu/default/mmlu_logical_fallacies.yaml +lm_eval/tasks/mmlu/default/mmlu_machine_learning.yaml +lm_eval/tasks/mmlu/default/mmlu_management.yaml +lm_eval/tasks/mmlu/default/mmlu_marketing.yaml +lm_eval/tasks/mmlu/default/mmlu_medical_genetics.yaml +lm_eval/tasks/mmlu/default/mmlu_miscellaneous.yaml +lm_eval/tasks/mmlu/default/mmlu_moral_disputes.yaml +lm_eval/tasks/mmlu/default/mmlu_moral_scenarios.yaml +lm_eval/tasks/mmlu/default/mmlu_nutrition.yaml +lm_eval/tasks/mmlu/default/mmlu_philosophy.yaml +lm_eval/tasks/mmlu/default/mmlu_prehistory.yaml +lm_eval/tasks/mmlu/default/mmlu_professional_accounting.yaml +lm_eval/tasks/mmlu/default/mmlu_professional_law.yaml +lm_eval/tasks/mmlu/default/mmlu_professional_medicine.yaml +lm_eval/tasks/mmlu/default/mmlu_professional_psychology.yaml +lm_eval/tasks/mmlu/default/mmlu_public_relations.yaml +lm_eval/tasks/mmlu/default/mmlu_security_studies.yaml +lm_eval/tasks/mmlu/default/mmlu_sociology.yaml +lm_eval/tasks/mmlu/default/mmlu_us_foreign_policy.yaml +lm_eval/tasks/mmlu/default/mmlu_virology.yaml +lm_eval/tasks/mmlu/default/mmlu_world_religions.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/_cot_prompts.json +lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/_mmlu_flan_cot_fewshot_template_yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_abstract_algebra.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_anatomy.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_astronomy.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_business_ethics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_clinical_knowledge.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_biology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_chemistry.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_computer_science.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_medicine.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_college_physics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_computer_security.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_conceptual_physics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_econometrics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_electrical_engineering.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_elementary_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_formal_logic.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_global_facts.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_biology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_chemistry.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_computer_science.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_european_history.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_geography.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_government_and_politics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_macroeconomics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_microeconomics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_physics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_psychology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_statistics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_us_history.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_high_school_world_history.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_aging.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_human_sexuality.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_international_law.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_jurisprudence.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_logical_fallacies.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_machine_learning.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_management.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_marketing.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_medical_genetics.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_miscellaneous.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_disputes.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_moral_scenarios.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_nutrition.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_philosophy.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_prehistory.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_accounting.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_law.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_medicine.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_professional_psychology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_public_relations.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_security_studies.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_sociology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_us_foreign_policy.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_virology.yaml +lm_eval/tasks/mmlu/flan_cot_fewshot/mmlu_world_religions.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/_mmlu_flan_cot_zeroshot_template_yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_abstract_algebra.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_anatomy.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_astronomy.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_business_ethics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_clinical_knowledge.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_biology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_chemistry.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_computer_science.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_medicine.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_college_physics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_computer_security.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_conceptual_physics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_econometrics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_electrical_engineering.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_elementary_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_formal_logic.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_global_facts.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_biology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_chemistry.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_computer_science.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_european_history.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_geography.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_government_and_politics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_macroeconomics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_mathematics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_microeconomics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_physics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_psychology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_statistics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_us_history.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_high_school_world_history.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_aging.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_human_sexuality.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_international_law.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_jurisprudence.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_logical_fallacies.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_machine_learning.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_management.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_marketing.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_medical_genetics.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_miscellaneous.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_disputes.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_moral_scenarios.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_nutrition.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_philosophy.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_prehistory.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_accounting.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_law.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_medicine.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_professional_psychology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_public_relations.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_security_studies.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_sociology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_us_foreign_policy.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_virology.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/mmlu_world_religions.yaml +lm_eval/tasks/mmlu/flan_cot_zeroshot/utils.py +lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/_mmlu_flan_generative_template_yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_abstract_algebra.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_anatomy.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_astronomy.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_business_ethics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_clinical_knowledge.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_biology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_chemistry.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_computer_science.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_medicine.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_college_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_computer_security.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_conceptual_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_econometrics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_electrical_engineering.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_elementary_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_formal_logic.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_global_facts.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_biology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_chemistry.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_computer_science.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_european_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_geography.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_government_and_politics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_macroeconomics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_microeconomics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_psychology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_statistics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_us_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_high_school_world_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_aging.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_human_sexuality.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_international_law.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_jurisprudence.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_logical_fallacies.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_machine_learning.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_management.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_marketing.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_medical_genetics.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_miscellaneous.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_moral_disputes.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_moral_scenarios.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_nutrition.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_philosophy.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_prehistory.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_accounting.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_law.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_medicine.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_professional_psychology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_public_relations.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_security_studies.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_sociology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_us_foreign_policy.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_virology.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/mmlu_world_religions.yaml +lm_eval/tasks/mmlu/flan_n_shot/generative/utils.py +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/_mmlu_flan_loglikelihood_template_yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_abstract_algebra.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_anatomy.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_astronomy.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_business_ethics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_clinical_knowledge.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_biology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_chemistry.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_computer_science.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_medicine.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_computer_security.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_conceptual_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_econometrics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_electrical_engineering.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_elementary_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_formal_logic.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_global_facts.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_biology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_chemistry.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_computer_science.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_european_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_geography.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_government_and_politics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_macroeconomics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_microeconomics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_physics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_psychology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_statistics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_us_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_world_history.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_aging.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_human_sexuality.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_international_law.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_logical_fallacies.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_machine_learning.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_management.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_marketing.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_medical_genetics.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_miscellaneous.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_disputes.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_moral_scenarios.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_nutrition.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_philosophy.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_prehistory.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_accounting.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_law.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_medicine.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_psychology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_public_relations.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_security_studies.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_sociology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_us_foreign_policy.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_virology.yaml +lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_world_religions.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py +lm_eval/tasks/model_written_evals/advanced_ai_risk/_template_yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-itself.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-ais.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-versions.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-less-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-neutral-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-myopic-reward.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-one-box-tendency.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-power-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-general-ai.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-good-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-architecture.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-wealth-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-versions.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-less-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-neutral-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-one-box-tendency.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-power-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-general-ai.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-good-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-training-architecture.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-web-gpt.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-survival-instinct.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-coordinate-itself.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-coordinate-other-ais.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-coordinate-other-versions.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-less-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-more-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-one-box-tendency.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-power-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-good-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-text-model.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-survival-instinct.yaml +lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml +lm_eval/tasks/model_written_evals/persona/_generate_configs.py +lm_eval/tasks/model_written_evals/persona/_template_yaml +lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml +lm_eval/tasks/model_written_evals/persona/agreeableness.yaml +lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml +lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml +lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml +lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml +lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml +lm_eval/tasks/model_written_evals/persona/believes-in-gun-rights.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-is-not-being-watched-by-humans.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml +lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml +lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml +lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml +lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-physical-embodiment.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml +lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml +lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml +lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml +lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml +lm_eval/tasks/model_written_evals/persona/extraversion.yaml +lm_eval/tasks/model_written_evals/persona/has-disability.yaml +lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml +lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml +lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml +lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml +lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml +lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml +lm_eval/tasks/model_written_evals/persona/low-discount-rate.yaml +lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml +lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml +lm_eval/tasks/model_written_evals/persona/narcissism.yaml +lm_eval/tasks/model_written_evals/persona/neuroticism.yaml +lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml +lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml +lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml +lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml +lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml +lm_eval/tasks/model_written_evals/persona/okay-with-using-many-resources.yaml +lm_eval/tasks/model_written_evals/persona/openness.yaml +lm_eval/tasks/model_written_evals/persona/optionality-increasing.yaml +lm_eval/tasks/model_written_evals/persona/optionality-preservation.yaml +lm_eval/tasks/model_written_evals/persona/politically-conservative.yaml +lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml +lm_eval/tasks/model_written_evals/persona/psychopathy.yaml +lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml +lm_eval/tasks/model_written_evals/persona/risk-averse.yaml +lm_eval/tasks/model_written_evals/persona/risk-neutral.yaml +lm_eval/tasks/model_written_evals/persona/risk-seeking.yaml +lm_eval/tasks/model_written_evals/persona/self-replication.yaml +lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Buddhism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Christianity.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Confucianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Hinduism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Islam.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-average-utilitarianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-deontology.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-moral-nihilism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-rule-utilitarianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-total-utilitarianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-utilitarianism.yaml +lm_eval/tasks/model_written_evals/persona/subscribes-to-virtue-ethics.yaml +lm_eval/tasks/model_written_evals/persona/very-small-harm-justifies-very-large-benefit.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-deployed-in-the-real-world.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-good-outcomes.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-authorities.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-experts.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-engage-in-acausal-cooperation.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-intentionally-be-non-HHH-to-become-more-HHH.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-make-acausal-trades-with-other-AIs-to-help-humanity.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-rate-own-statements-highly-to-look-better.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-use-physical-force-to-achieve-benevolent-goals.yaml +lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml +lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_nlp_survey.yaml +lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_philpapers2020.yaml +lm_eval/tasks/model_written_evals/sycophancy/sycophancy_on_political_typology_quiz.yaml +lm_eval/tasks/model_written_evals/winogenerated/_template_yaml +lm_eval/tasks/mutual/README.md +lm_eval/tasks/mutual/multual_plus.yaml +lm_eval/tasks/mutual/mutual.yaml +lm_eval/tasks/mutual/utils.py +lm_eval/tasks/nq_open/README.md +lm_eval/tasks/nq_open/nq_open.yaml +lm_eval/tasks/okapi/arc_multilingual/README.md +lm_eval/tasks/okapi/arc_multilingual/_arc_yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ar.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_bn.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ca.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_da.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_de.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_es.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_eu.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_fr.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_gu.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_hi.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_hr.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_hu.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_hy.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_id.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_it.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_kn.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ml.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_mr.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ne.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_nl.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_pt.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ro.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ru.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_sk.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_sr.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_sv.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_ta.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_te.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_uk.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_vi.yaml +lm_eval/tasks/okapi/arc_multilingual/arc_zh.yaml +lm_eval/tasks/okapi/arc_multilingual/utils.py +lm_eval/tasks/okapi/hellaswag_multilingual/README.md +lm_eval/tasks/okapi/hellaswag_multilingual/_hellaswag_yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ar.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_bn.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ca.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_da.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_de.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_es.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_eu.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_fr.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_gu.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hi.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hr.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hu.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_hy.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_id.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_it.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_kn.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ml.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_mr.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ne.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_nl.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_pt.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ro.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ru.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sk.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sr.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_sv.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_ta.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_te.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_uk.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/hellaswag_vi.yaml +lm_eval/tasks/okapi/hellaswag_multilingual/utils.py +lm_eval/tasks/okapi/mmlu_multilingual/_default_yaml +lm_eval/tasks/okapi/mmlu_multilingual/_generate_configs.py +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ar.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_bn.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ca.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_da.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_de.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_en.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_es.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_eu.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_fr.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_gu.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hi.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hr.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hu.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_hy.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_id.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_is.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_it.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_kn.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ml.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_mr.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_nb.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ne.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_nl.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_pt.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ro.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ru.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sk.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sr.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_sv.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_ta.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_te.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_uk.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_vi.yaml +lm_eval/tasks/okapi/mmlu_multilingual/m_mmlu_zh.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/README.md +lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc1_yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/_truthfulqa_mc2_yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ar_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_bn_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ca_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ca_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_da_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_de_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_de_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_es_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_eu_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_fr_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_gu_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hi_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hr_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hu_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_hy_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_id_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_it_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_kn_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ml_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ml_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_mr_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_mr_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ne_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ne_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_nl_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_nl_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_pt_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_pt_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ro_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ro_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ru_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ru_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sk_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sk_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sr_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sr_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sv_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_sv_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ta_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_ta_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_te_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_te_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_uk_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_uk_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_vi_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_vi_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_zh_mc1.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/truthfulqa_zh_mc2.yaml +lm_eval/tasks/okapi/truthfulqa_multilingual/utils.py +lm_eval/tasks/openbookqa/README.md +lm_eval/tasks/openbookqa/openbookqa.yaml +lm_eval/tasks/paws-x/README.md +lm_eval/tasks/paws-x/_generate_config.py +lm_eval/tasks/paws-x/paws_de.yaml +lm_eval/tasks/paws-x/paws_en.yaml +lm_eval/tasks/paws-x/paws_es.yaml +lm_eval/tasks/paws-x/paws_fr.yaml +lm_eval/tasks/paws-x/paws_ja.yaml +lm_eval/tasks/paws-x/paws_ko.yaml +lm_eval/tasks/paws-x/paws_zh.yaml +lm_eval/tasks/paws-x/pawsx_template_yaml +lm_eval/tasks/pile/README.md +lm_eval/tasks/pile/pile_arxiv.yaml +lm_eval/tasks/pile/pile_bookcorpus2.yaml +lm_eval/tasks/pile/pile_books3.yaml +lm_eval/tasks/pile/pile_dm-mathematics.yaml +lm_eval/tasks/pile/pile_enron.yaml +lm_eval/tasks/pile/pile_europarl.yaml +lm_eval/tasks/pile/pile_freelaw.yaml +lm_eval/tasks/pile/pile_github.yaml +lm_eval/tasks/pile/pile_gutenberg.yaml +lm_eval/tasks/pile/pile_hackernews.yaml +lm_eval/tasks/pile/pile_nih-exporter.yaml +lm_eval/tasks/pile/pile_opensubtitles.yaml +lm_eval/tasks/pile/pile_openwebtext2.yaml +lm_eval/tasks/pile/pile_philpapers.yaml +lm_eval/tasks/pile/pile_pile-cc.yaml +lm_eval/tasks/pile/pile_pubmed-abstracts.yaml +lm_eval/tasks/pile/pile_pubmed-central.yaml +lm_eval/tasks/pile/pile_stackexchange.yaml +lm_eval/tasks/pile/pile_ubuntu-irc.yaml +lm_eval/tasks/pile/pile_uspto.yaml +lm_eval/tasks/pile/pile_wikipedia.yaml +lm_eval/tasks/pile/pile_youtubesubtitles.yaml +lm_eval/tasks/piqa/README.md +lm_eval/tasks/piqa/piqa.yaml +lm_eval/tasks/polemo2/README.md +lm_eval/tasks/polemo2/polemo2_in.yaml +lm_eval/tasks/polemo2/polemo2_out.yaml +lm_eval/tasks/prost/README.md +lm_eval/tasks/prost/corypaik_prost.yaml +lm_eval/tasks/pubmedqa/README.md +lm_eval/tasks/pubmedqa/preprocess_pubmedqa.py +lm_eval/tasks/pubmedqa/pubmedqa.yaml +lm_eval/tasks/qa4mre/README.md +lm_eval/tasks/qa4mre/preprocess_qa4mre.py +lm_eval/tasks/qa4mre/qa4mre_2011.yaml +lm_eval/tasks/qa4mre/qa4mre_2012.yaml +lm_eval/tasks/qa4mre/qa4mre_2013.yaml +lm_eval/tasks/qasper/README.md +lm_eval/tasks/qasper/bool.yaml +lm_eval/tasks/qasper/freeform.yaml +lm_eval/tasks/qasper/metrics.py +lm_eval/tasks/qasper/utils.py +lm_eval/tasks/race/README.md +lm_eval/tasks/race/preprocess_race.py +lm_eval/tasks/race/race.yaml +lm_eval/tasks/realtoxicityprompts/metric.py +lm_eval/tasks/realtoxicityprompts/realtoxicityprompts.yaml +lm_eval/tasks/sciq/README.md +lm_eval/tasks/sciq/sciq.yaml +lm_eval/tasks/scrolls/README.md +lm_eval/tasks/scrolls/scrolls.yaml +lm_eval/tasks/scrolls/task.py +lm_eval/tasks/siqa/README.md +lm_eval/tasks/siqa/siqa.yaml +lm_eval/tasks/squadv2/README.md +lm_eval/tasks/squadv2/squadv2.yaml +lm_eval/tasks/squadv2/task.py +lm_eval/tasks/storycloze/README.md +lm_eval/tasks/storycloze/storycloze_2016.yaml +lm_eval/tasks/storycloze/storycloze_2018.yaml +lm_eval/tasks/super_glue/README.md +lm_eval/tasks/super_glue/boolq/default.yaml +lm_eval/tasks/super_glue/boolq/seq2seq.yaml +lm_eval/tasks/super_glue/boolq/t5-prompt.yaml +lm_eval/tasks/super_glue/cb/aggregate.py +lm_eval/tasks/super_glue/cb/default.yaml +lm_eval/tasks/super_glue/cb/t5-prompt.yaml +lm_eval/tasks/super_glue/cb/t5_utils.py +lm_eval/tasks/super_glue/copa/default.yaml +lm_eval/tasks/super_glue/copa/t5-prompt.yaml +lm_eval/tasks/super_glue/copa/utils.py +lm_eval/tasks/super_glue/multirc/default.yaml +lm_eval/tasks/super_glue/multirc/t5-prompt.yaml +lm_eval/tasks/super_glue/multirc/t5_utils.py +lm_eval/tasks/super_glue/record/default.yaml +lm_eval/tasks/super_glue/record/t5-prompt.yaml +lm_eval/tasks/super_glue/record/t5_utils.py +lm_eval/tasks/super_glue/record/util.py +lm_eval/tasks/super_glue/rte/default.yaml +lm_eval/tasks/super_glue/rte/t5-prompt.yaml +lm_eval/tasks/super_glue/wic/default.yaml +lm_eval/tasks/super_glue/wic/t5-prompt.yaml +lm_eval/tasks/super_glue/wsc/default.yaml +lm_eval/tasks/super_glue/wsc/preprocess_wsc.py +lm_eval/tasks/super_glue/wsc/t5-prompt.yaml +lm_eval/tasks/super_glue/wsc/t5_utils.py +lm_eval/tasks/swag/README.md +lm_eval/tasks/swag/swag.yaml +lm_eval/tasks/tmmluplus/README.md +lm_eval/tasks/tmmluplus/subject.tsv +lm_eval/tasks/tmmluplus/default/_default_template_yaml +lm_eval/tasks/tmmluplus/default/_generate_configs.py +lm_eval/tasks/tmmluplus/default/tmmluplus.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_accounting.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_administrative_law.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_advance_chemistry.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_agriculture.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_anti_money_laundering.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_auditing.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_basic_medical_science.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_business_management.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_chinese_language_and_literature.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_clinical_psychology.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_computer_science.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_culinary_skills.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_dentistry.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_economics.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_education.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_education_(profession_level).yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_educational_psychology.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_engineering_math.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_finance_banking.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_financial_analysis.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_fire_science.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_general_principles_of_law.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_geography_of_taiwan.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_human_behavior.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_insurance_studies.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_introduction_to_law.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_jce_humanities.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chemistry.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_junior_chinese_exam.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_junior_math_exam.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_junior_science_exam.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_junior_social_studies.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_linear_algebra.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_logic_reasoning.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_macroeconomics.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_management_accounting.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_marketing_management.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_mechanical.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_music.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_national_protection.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_nautical_science.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_occupational_therapy_for_psychological_disorders.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_official_document_management.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_optometry.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_organic_chemistry.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacology.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_pharmacy.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_physical_education.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_physics.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_politic_science.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_real_estate.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_secondary_physics.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_statistics_and_machine_learning.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_taiwanese_hokkien.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_taxation.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_technical.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_three_principles_of_people.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_trade.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_traditional_chinese_medicine_clinical_medicine.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_trust_practice.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_ttqav2.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_tve_chinese_language.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_tve_design.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_tve_mathematics.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_tve_natural_sciences.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pathology.yaml +lm_eval/tasks/tmmluplus/default/tmmluplus_veterinary_pharmacology.yaml +lm_eval/tasks/tmmluplus/default/utils.py +lm_eval/tasks/toxigen/README.md +lm_eval/tasks/toxigen/toxigen.yaml +lm_eval/tasks/toxigen/utils.py +lm_eval/tasks/translation/README.md +lm_eval/tasks/translation/iwslt2017_ar-en.yaml +lm_eval/tasks/translation/iwslt2017_en-ar.yaml +lm_eval/tasks/translation/utils.py +lm_eval/tasks/translation/wmt14_en-fr.yaml +lm_eval/tasks/translation/wmt14_fr-en.yaml +lm_eval/tasks/translation/wmt16_de-en.yaml +lm_eval/tasks/translation/wmt16_en-de.yaml +lm_eval/tasks/translation/wmt16_en-ro.yaml +lm_eval/tasks/translation/wmt16_ro-en.yaml +lm_eval/tasks/translation/wmt_common_yaml +lm_eval/tasks/triviaqa/README.md +lm_eval/tasks/triviaqa/default.yaml +lm_eval/tasks/truthfulqa/README.md +lm_eval/tasks/truthfulqa/truthfulqa_gen.yaml +lm_eval/tasks/truthfulqa/truthfulqa_mc1.yaml +lm_eval/tasks/truthfulqa/truthfulqa_mc2.yaml +lm_eval/tasks/truthfulqa/utils.py +lm_eval/tasks/unscramble/README.md +lm_eval/tasks/unscramble/anagrams1.yaml +lm_eval/tasks/unscramble/anagrams2.yaml +lm_eval/tasks/unscramble/cycle_letters.yaml +lm_eval/tasks/unscramble/random_insertion.yaml +lm_eval/tasks/unscramble/reversed_words.yaml +lm_eval/tasks/webqs/README.md +lm_eval/tasks/webqs/utils.py +lm_eval/tasks/webqs/webqs.yaml +lm_eval/tasks/wikitext/README.md +lm_eval/tasks/wikitext/preprocess_wikitext.py +lm_eval/tasks/wikitext/wikitext.yaml +lm_eval/tasks/winogrande/README.md +lm_eval/tasks/winogrande/default.yaml +lm_eval/tasks/winogrande/preprocess_winogrande.py +lm_eval/tasks/wmdp/README.md +lm_eval/tasks/wmdp/_default_template_yaml +lm_eval/tasks/wmdp/wmdp_bio.yaml +lm_eval/tasks/wmdp/wmdp_chem.yaml +lm_eval/tasks/wmdp/wmdp_cyber.yaml +lm_eval/tasks/wmt2016/README.md +lm_eval/tasks/wmt2016/metrics.py +lm_eval/tasks/wmt2016/ro_en-t5_prompt.yaml +lm_eval/tasks/wsc273/README.md +lm_eval/tasks/wsc273/default.yaml +lm_eval/tasks/wsc273/utils.py +lm_eval/tasks/xcopa/README.md +lm_eval/tasks/xcopa/default_et.yaml +lm_eval/tasks/xcopa/default_ht.yaml +lm_eval/tasks/xcopa/default_id.yaml +lm_eval/tasks/xcopa/default_it.yaml +lm_eval/tasks/xcopa/default_qu.yaml +lm_eval/tasks/xcopa/default_sw.yaml +lm_eval/tasks/xcopa/default_ta.yaml +lm_eval/tasks/xcopa/default_th.yaml +lm_eval/tasks/xcopa/default_tr.yaml +lm_eval/tasks/xcopa/default_vi.yaml +lm_eval/tasks/xcopa/default_zh.yaml +lm_eval/tasks/xcopa/utils.py +lm_eval/tasks/xnli/README.md +lm_eval/tasks/xnli/utils.py +lm_eval/tasks/xnli/xnli_ar.yaml +lm_eval/tasks/xnli/xnli_bg.yaml +lm_eval/tasks/xnli/xnli_common_yaml +lm_eval/tasks/xnli/xnli_de.yaml +lm_eval/tasks/xnli/xnli_el.yaml +lm_eval/tasks/xnli/xnli_en.yaml +lm_eval/tasks/xnli/xnli_es.yaml +lm_eval/tasks/xnli/xnli_fr.yaml +lm_eval/tasks/xnli/xnli_hi.yaml +lm_eval/tasks/xnli/xnli_ru.yaml +lm_eval/tasks/xnli/xnli_sw.yaml +lm_eval/tasks/xnli/xnli_th.yaml +lm_eval/tasks/xnli/xnli_tr.yaml +lm_eval/tasks/xnli/xnli_ur.yaml +lm_eval/tasks/xnli/xnli_vi.yaml +lm_eval/tasks/xnli/xnli_zh.yaml +lm_eval/tasks/xstorycloze/README.md +lm_eval/tasks/xstorycloze/default_ar.yaml +lm_eval/tasks/xstorycloze/default_en.yaml +lm_eval/tasks/xstorycloze/default_es.yaml +lm_eval/tasks/xstorycloze/default_eu.yaml +lm_eval/tasks/xstorycloze/default_hi.yaml +lm_eval/tasks/xstorycloze/default_id.yaml +lm_eval/tasks/xstorycloze/default_my.yaml +lm_eval/tasks/xstorycloze/default_ru.yaml +lm_eval/tasks/xstorycloze/default_sw.yaml +lm_eval/tasks/xstorycloze/default_te.yaml +lm_eval/tasks/xstorycloze/default_zh.yaml +lm_eval/tasks/xwinograd/README.md +lm_eval/tasks/xwinograd/utils.py +lm_eval/tasks/xwinograd/xwinograd_common_yaml +lm_eval/tasks/xwinograd/xwinograd_en.yaml +lm_eval/tasks/xwinograd/xwinograd_fr.yaml +lm_eval/tasks/xwinograd/xwinograd_jp.yaml +lm_eval/tasks/xwinograd/xwinograd_pt.yaml +lm_eval/tasks/xwinograd/xwinograd_ru.yaml +lm_eval/tasks/xwinograd/xwinograd_zh.yaml +tests/test_cli.py +tests/test_evaluator.py +tests/test_janitor.py +tests/test_misc.py +tests/test_requests_caching.py +tests/test_tasks.py +tests/test_utils.py \ No newline at end of file diff --git a/lm-evaluation/lm_eval.egg-info/dependency_links.txt b/lm-evaluation/lm_eval.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/lm-evaluation/lm_eval.egg-info/entry_points.txt b/lm-evaluation/lm_eval.egg-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..0cfb793897d6f65b9e7ba85d6ed10d57d52d0821 --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +lm-eval = lm_eval.__main__:cli_evaluate +lm_eval = lm_eval.__main__:cli_evaluate diff --git a/lm-evaluation/lm_eval.egg-info/requires.txt b/lm-evaluation/lm_eval.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..5b61748aba511e98bbe0a2fb7e3ff12eac616182 --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/requires.txt @@ -0,0 +1,104 @@ +accelerate>=0.21.0 +evaluate +datasets>=2.16.0 +evaluate>=0.4.0 +jsonlines +numexpr +peft>=0.2.0 +pybind11>=2.6.2 +pytablewriter +rouge-score>=0.0.4 +sacrebleu>=1.5.0 +scikit-learn>=0.24.1 +sqlitedict +torch>=1.8 +tqdm-multiprocess +transformers>=4.1 +zstandard +dill +word2number +more_itertools + +[all] +lm_eval[anthropic] +lm_eval[dev] +lm_eval[gptq] +lm_eval[hf_transfer] +lm_eval[ifeval] +lm_eval[mamba] +lm_eval[math] +lm_eval[multilingual] +lm_eval[openai] +lm_eval[promptsource] +lm_eval[sentencepiece] +lm_eval[testing] +lm_eval[vllm] +lm_eval[zeno] +lm_eval[wandb] + +[anthropic] +anthropic + +[dev] +pytest +pytest-cov +pytest-xdist +pre-commit +mypy + +[gptq] +auto-gptq[triton]>=0.6.0 + +[hf_transfer] +hf_transfer + +[ifeval] +langdetect +immutabledict + +[mamba] +mamba_ssm +causal-conv1d==1.0.2 + +[math] +sympy>=1.12 +antlr4-python3-runtime==4.11 + +[multilingual] +nagisa>=0.2.7 +jieba>=0.42.1 +pycountry + +[neuronx] +optimum[neuronx] + +[openai] +openai==1.3.9 +tiktoken + +[optimum] +optimum[openvino] + +[promptsource] +promptsource>=0.2.3 + +[sentencepiece] +sentencepiece>=0.1.98 +protobuf>=4.22.1 + +[testing] +pytest +pytest-cov +pytest-xdist + +[vllm] +vllm==0.3.2 + +[wandb] +wandb>=0.16.3 +pandas +numpy + +[zeno] +pandas +zeno-client diff --git a/lm-evaluation/lm_eval.egg-info/top_level.txt b/lm-evaluation/lm_eval.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b3d57e8da16b93f8501d104cad0591ca57995369 --- /dev/null +++ b/lm-evaluation/lm_eval.egg-info/top_level.txt @@ -0,0 +1 @@ +lm_eval diff --git a/lm-evaluation/scripts/__init__.py b/lm-evaluation/scripts/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation/scripts/build_benchmark.py b/lm-evaluation/scripts/build_benchmark.py new file mode 100644 index 0000000000000000000000000000000000000000..fc99b5ec37c6979bf55f6a1ac0ea6808fd0e539f --- /dev/null +++ b/lm-evaluation/scripts/build_benchmark.py @@ -0,0 +1,61 @@ +import argparse +import os + +import yaml +from promptsource.templates import DatasetTemplates +from tqdm import tqdm + +# from lm_eval.api.registry import ALL_TASKS +from lm_eval.logger import eval_logger + + +# from lm_eval.tasks import include_task_folder + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--benchmark_name", required=True) + parser.add_argument("--benchmark_path", required=True) + parser.add_argument("--task_save_path", default="lm_eval/tasks/") + return parser.parse_args() + + +if __name__ == "__main__": + args = parse_args() + + with open(args.benchmark_path, encoding="utf-8") as file: + TASK_LIST = yaml.full_load(file) + for task in tqdm(TASK_LIST): + eval_logger.info(f"Processing {task}") + + dataset_name = task["dataset_path"] + if "dataset_name" in task: + subset_name = task["dataset_name"] + file_subdir = f"{dataset_name}/{subset_name}" + else: + subset_name = None + file_subdir = f"{dataset_name}" + + file_path = os.path.join(args.task_save_path, file_subdir, "promptsource/") + + os.makedirs(file_path, exist_ok=True) + + if subset_name is None: + prompts = DatasetTemplates(dataset_name=dataset_name) + else: + prompts = DatasetTemplates( + dataset_name=dataset_name, subset_name=subset_name + ) + + for idx, prompt_name in enumerate(prompts.all_template_names): + full_file_name = f"promptsource_{idx}.yaml" + config_dict = { + "group": args.benchmark_name, + "include": "promptsource_template.yaml", + "use_prompts": f"promptsource:{prompt_name}", + } + + file_save_path = os.path.join(file_path, full_file_name) + eval_logger.info(f"Save to {file_save_path}") + with open(file_save_path, "w", encoding="utf-8") as yaml_file: + yaml.dump(config_dict, yaml_file) diff --git a/lm-evaluation/scripts/clean_training_data/__init__.py b/lm-evaluation/scripts/clean_training_data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation/scripts/clean_training_data/compress_and_package.py b/lm-evaluation/scripts/clean_training_data/compress_and_package.py new file mode 100644 index 0000000000000000000000000000000000000000..d4af5ba5f3d5e16a485984ced2324951e56ad829 --- /dev/null +++ b/lm-evaluation/scripts/clean_training_data/compress_and_package.py @@ -0,0 +1,73 @@ +import argparse +import glob +import logging +import os +import shutil +import subprocess + +from tqdm import tqdm +from tqdm_multiprocess import TqdmMultiProcessPool +from tqdm_multiprocess.logger import setup_logger_tqdm + + +logger = logging.getLogger(__name__) + + +def process_task( + working_directory, output_directory, bucket_file_path, tqdm_func, global_tqdm +): + command = f"zstd {bucket_file_path}" + logger.info(command) + subprocess.call(command, shell=True) + + compressed_file = bucket_file_path + ".zst" + if output_directory: + shutil.move(compressed_file, output_directory) + + os.remove(bucket_file_path) + global_tqdm.update() + + +def compress_and_move(working_directory, output_directory, process_count): + os.makedirs(output_directory, exist_ok=True) + original_info_file_path = os.path.join(working_directory, "info.json") + assert os.path.exists(original_info_file_path) + + tasks = [] + bucket_file_paths = glob.glob( + os.path.join(working_directory, "output", "*.bkt.txt.sorted") + ) + for bucket_file_path in bucket_file_paths: + task = (process_task, (working_directory, output_directory, bucket_file_path)) + tasks.append(task) + + pool = TqdmMultiProcessPool(process_count) + + def on_done(_): + return None + + def on_error(_): + return None + + global_progress = tqdm( + total=len(bucket_file_paths), dynamic_ncols=True, unit="file" + ) + _ = pool.map(global_progress, tasks, on_error, on_done) + + shutil.copy(original_info_file_path, os.path.join(output_directory, "info.json")) + + +parser = argparse.ArgumentParser(description="sort 13gram buckets") +parser.add_argument("-dir", "--working_directory", required=True) +parser.add_argument("-output", "--output_directory", required=True) +parser.add_argument("-procs", "--process_count", type=int, default=8) + +if __name__ == "__main__": + version = 1.00 + print(f"Running version {version}") + + logfile_path = "compress_and_package.log" + setup_logger_tqdm(logfile_path) + + args = parser.parse_args() + compress_and_move(args.working_directory, args.output_directory, args.process_count) diff --git a/lm-evaluation/scripts/clean_training_data/investigate_pile.py b/lm-evaluation/scripts/clean_training_data/investigate_pile.py new file mode 100644 index 0000000000000000000000000000000000000000..681b591ced535dbb884fb65f58a0c9042c35b0ac --- /dev/null +++ b/lm-evaluation/scripts/clean_training_data/investigate_pile.py @@ -0,0 +1,95 @@ +import glob +import json +import os +from functools import reduce + +import tqdm +from tqdm_multiprocess import TqdmMultiProcessPool + +from lm_eval.decontamination.archiver import Reader + + +def get_file_stats(file_path, tqdm_func, global_tqdm): + reader = Reader() + total_documents = 0 + total_size = 0 + update_frequency = 10000 + current_file_position = 0 + + with tqdm_func( + total=os.path.getsize(file_path), dynamic_ncols=True, unit="byte", unit_scale=1 + ) as progress: + for document in reader.read(file_path, get_meta=True): + total_size += len(document) + total_documents += 1 + + if total_documents % update_frequency == 0: + new_file_pos = reader.fh.tell() + bytes_read = new_file_pos - current_file_position + current_file_position = new_file_pos + progress.update(bytes_read) + global_tqdm.update(bytes_read) + + return (total_documents, total_size) + + +def get_files(): + directory = "pile" + files = list(sorted(glob.glob(os.path.join(directory, "*.jsonl.zst*")))) + print(files) + return files + + +def get_stats(): + files = get_files() + total_size_bytes = sum(map(lambda x: os.path.getsize(x), files)) + + pool = TqdmMultiProcessPool(4) + global_tqdm = tqdm.tqdm( + total=total_size_bytes, dynamic_ncols=True, unit="byte", unit_scale=1 + ) + + # Generate minhashes with pool + tasks = [(get_file_stats, (file,)) for file in files] + + def on_done(_): + return None + + def on_error(_): + return None + + results = pool.map(global_tqdm, tasks, on_error, on_done) + + total_documents, total_size = reduce( + lambda x, y: (x[0] + y[0], x[1] + y[1]), results + ) + + start_offsets = [] + current_offset = 0 + for file_document_count, _ in results: + start_offsets.append(current_offset) + current_offset += file_document_count + + return (total_documents, total_size, start_offsets) + + +if __name__ == "__main__": + version = 1.01 + print(f"Running version {version}") + + stats_file_path = "pile_statistics.json" + if os.path.exists(stats_file_path): + stats = json.load(open(stats_file_path, "r", encoding="utf-8")) + else: + document_count, total_document_size_chars, start_offsets = get_stats() + stats = { + "Data": "Pile statistics", + "Document Count": document_count, + "Total Pile Characters": total_document_size_chars, + "File Start Offsets": start_offsets, + } + json.dump(stats, open(stats_file_path, "w", encoding="utf-8"), indent=4) + + print(f"document_count: {stats['Document Count']}") + print(f"total_chars: {stats['Total Pile Characters']}") + print(f"start_offsets: {stats['File Start Offsets']}") diff --git a/lm-evaluation/scripts/clean_training_data/process_sorted_buckets.py b/lm-evaluation/scripts/clean_training_data/process_sorted_buckets.py new file mode 100644 index 0000000000000000000000000000000000000000..9d345d8e86f409495b95a73f4539b2f4df57af70 --- /dev/null +++ b/lm-evaluation/scripts/clean_training_data/process_sorted_buckets.py @@ -0,0 +1,129 @@ +""" +Processes each sorted bucket, creating a new file listing all ngrams that matched more then 10 +unique documents with their unique document counts. Uses multiprocessing and very little memory +as we stream from presorted buckets. Will use a lot of disk though. + +Arguments +--------- +--working_directory (-dir) + Directory containing the sorted buckets, processed files will be deposited here. Default: current directory +--move_dir (-move) + Directory to move processed 13grams too. Default: Do nothing +--process_count (-procs) + Number of processes to use. Default: 4 +""" + +import argparse +import glob +import logging +import os +import re +import shutil +from pathlib import Path + +from tqdm import tqdm +from tqdm_multiprocess import TqdmMultiProcessPool +from tqdm_multiprocess.logger import setup_logger_tqdm + +from scripts.clean_training_data.archiver import TextArchive, TextReader + + +logger = logging.getLogger(__name__) + + +# Multiprocessed +def process_bucket( + bucket_file_path, processed_directory, move_dir, tqdm_func, global_tqdm +): + bucket_id = re.sub("\D", "", os.path.basename(bucket_file_path)) # noqa: W605 + done_file = os.path.join( + processed_directory, f"ngram_bucket_processing_{bucket_id}.done" + ) + if os.path.exists(done_file): + logger.info(f"bucket {bucket_id} already processed, skipping") + return + + # For managing tqdm + file_size = os.path.getsize(bucket_file_path) + bucket_progress = tqdm_func( + total=file_size, dynamic_ncols=True, unit="byte", unit_scale=1 + ) + current_file_position = 0 + update_frequency = 100 * 1000000 # 100mb + update_counter = 0 + + # Iterate through and output ngrams which occur in more then 10 documents + bucket = TextReader(bucket_file_path) + + output_file_path = bucket_file_path + ".processed" + output_archive = TextArchive(output_file_path, mode="wb") + + current_ngram = "" + current_ngram_document_ids = set() + for line in bucket.read(): + [ngram, document_id] = line.rsplit(" ", 1) + + # Write ngram if more then 10 unique document occurrences + if ngram != current_ngram: + if len(current_ngram_document_ids) > 10: + output_archive.add_data( + f"{current_ngram} {len(current_ngram_document_ids)}" + ) + current_ngram = ngram + current_ngram_document_ids = set() + + current_ngram_document_ids.add(document_id) + + # Update tqdm + update_counter += bucket.fh.tell() - current_file_position + current_file_position = bucket.fh.tell() + if update_counter > update_frequency: + bucket_progress.update(update_counter) + update_counter = 0 + + # Remainder + if len(current_ngram_document_ids) > 10: + output_archive.add_data(f"{current_ngram} {len(current_ngram_document_ids)}") + + output_archive.commit() + Path(done_file).touch() + + if move_dir: + shutil.move(output_file_path, move_dir) + + global_tqdm.update() + + +def process_sorted_buckets(working_directory, move_dir, process_count): + bucket_file_paths = glob.glob(os.path.join(working_directory, "*.bkt.txt.sorted")) + processed_directory = os.path.join(working_directory, "processed") + os.makedirs(processed_directory, exist_ok=True) + + pool = TqdmMultiProcessPool(process_count) + tasks = [ + (process_bucket, (bucket_file, processed_directory, move_dir)) + for bucket_file in bucket_file_paths + ] + + global_tqdm = tqdm(total=len(bucket_file_paths), dynamic_ncols=True, unit="bucket") + + def on_done(_): + return None + + def on_error(_): + return None + + _ = pool.map(global_tqdm, tasks, on_error, on_done) + + +parser = argparse.ArgumentParser(description="Process 13 grams from sorted buckets.") +parser.add_argument("-dir", "--working_directory", default="") +parser.add_argument("-move", "--move_dir", default="") +parser.add_argument("-procs", "--process_count", type=int, default=4) + +if __name__ == "__main__": + logfile_path = "process13grams.log" + setup_logger_tqdm(logfile_path) + + args = parser.parse_args() + process_sorted_buckets(args.working_directory, args.move_dir, args.process_count) diff --git a/lm-evaluation/scripts/get_prompts.py b/lm-evaluation/scripts/get_prompts.py new file mode 100644 index 0000000000000000000000000000000000000000..d262ec37e40f229c2009f9f162cc58834291de12 --- /dev/null +++ b/lm-evaluation/scripts/get_prompts.py @@ -0,0 +1,25 @@ +from itertools import islice + +from lm_eval import tasks + + +ct = 3 + +for ( + tname, + Task, +) in tasks.TASK_REGISTRY.items(): # [('record', tasks.superglue.ReCoRD)]:# + task = Task() + + print("#", tname) + docs = islice( + task.validation_docs() if task.has_validation_docs() else task.test_docs(), ct + ) + print() + for i in range(ct): + print() + doc = next(docs) + print("**Context**:", "\n```\n" + task.doc_to_text(doc) + "\n```\n") + print() + print("**Target**:", "\n```\n" + task.doc_to_target(doc) + "\n```\n") + print() diff --git a/lm-evaluation/scripts/make_gpt2_test_cases.py b/lm-evaluation/scripts/make_gpt2_test_cases.py new file mode 100644 index 0000000000000000000000000000000000000000..0c1a4bffe03ef057c331dc9a20c0a5eadb46be66 --- /dev/null +++ b/lm-evaluation/scripts/make_gpt2_test_cases.py @@ -0,0 +1,48 @@ +import random + +import torch +import torch.nn.functional as F +import transformers + + +random.seed(42) + + +data = [ + "A multilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)", + "The term MLP is used ambiguously, sometimes loosely to any feedforward ANN, sometimes strictly to refer to networks composed of multiple layers of perceptrons (with threshold activation); see § Terminology", + 'Multilayer perceptrons are sometimes colloquially referred to as "vanilla" neural networks, especially when they have a single hidden layer.[1]', + "An MLP consists of at least three layers of nodes: an input layer, a hidden layer and an output layer. Except for the input nodes, each node is a neuron that uses a nonlinear activation function.", + "MLP utilizes a supervised learning technique called backpropagation for training.[2][3] Its multiple layers and non-linear activation distinguish MLP from a linear perceptron. It can distinguish data that is not linearly separable.[4]", + "Recent work has demonstrated substantial gains on many NLP tasks and benchmarks by pre-training on a large corpus of text followed by fine-tuning on a specific task. While typically task-agnostic in architecture, this method still requires task-specific fine-tuning datasets of thousands or tens of thousands of examples. By contrast, humans can generally perform a new language task from only a few examples or from simple instructions - something which current NLP systems still largely struggle to do. Here we show that scaling up language models greatly improves task-agnostic, few-shot performance, sometimes even reaching competitiveness with prior state-of-the-art fine-tuning approaches. ", + "Specifically, we train GPT-3, an autoregressive language model with 175 billion parameters, 10x more than any previous non-sparse language model, and test its performance in the few-shot setting. For all tasks, GPT-3 is applied without any gradient updates or fine-tuning, with tasks and few-shot demonstrations specified purely via text interaction with the model. GPT-3 achieves strong performance on many NLP datasets, including translation, question-answering, and cloze tasks, as well as several tasks that require on-the-fly reasoning or domain adaptation, such as unscrambling words, using a novel word in a sentence, or performing 3-digit arithmetic. At the same time, we also identify some datasets where GPT-3's few-shot learning still struggles, as well as some datasets where GPT-3 faces methodological issues related to training on large web corpora. Finally, we find that GPT-3 can generate samples of news articles which human evaluators have difficulty distinguishing from articles written by humans. We discuss broader societal impacts of this finding and of GPT-3 in general.", + "A multilayer perceptron (MLP) is a class of feedforward artificial neural network (ANN)", + "Hello World", +] + + +model = transformers.GPT2LMHeadModel.from_pretrained("gpt2") +tok = transformers.GPT2Tokenizer.from_pretrained("gpt2") + +tgs = [] + +for dat in data: + random.seed(dat) + # print(model(tok.encode(dat, return_tensors="pt"))[0][0]) + + toks = tok.encode(dat, return_tensors="pt") + ind = random.randrange(len(toks[0]) - 1) + logits = F.log_softmax(model(toks)[0], dim=-1)[:, :-1] # [batch, seq, vocab] + + res = torch.gather(logits, 2, toks[:, 1:].unsqueeze(-1)).squeeze(-1)[0] + + tgs.append(float(res[ind:].sum())) + print( + r'("""' + + tok.decode(toks[0, : ind + 1]) + + r'""", """' + + tok.decode(toks[0, ind + 1 :]) + + r'"""), ' + ) + +print(tgs) diff --git a/lm-evaluation/scripts/make_table_tasks.py b/lm-evaluation/scripts/make_table_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..0c8c44bc656c613df4a1cd9f7eca97638607a645 --- /dev/null +++ b/lm-evaluation/scripts/make_table_tasks.py @@ -0,0 +1,54 @@ +""" +Usage: + python make_table_tasks.py --output +""" +import argparse +import logging + +from pytablewriter import MarkdownTableWriter + +from lm_eval import tasks + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def check(tf): + if tf: + return "✓" + else: + return " " + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--output", type=str, default="task_table.md") + args = parser.parse_args() + + writer = MarkdownTableWriter() + writer.headers = ["Task Name", "Train", "Val", "Test", "Val/Test Docs", "Metrics"] + values = [] + + tasks = tasks.TASK_REGISTRY.items() + tasks = sorted(tasks, key=lambda x: x[0]) + for tname, Task in tasks: + task = Task() + v = [ + tname, + check(task.has_training_docs()), + check(task.has_validation_docs()), + check(task.has_test_docs()), + len( + list( + task.test_docs() if task.has_test_docs() else task.validation_docs() + ) + ), + ", ".join(task.aggregation().keys()), + ] + logger.info(v) + values.append(v) + writer.value_matrix = values + table = writer.dumps() + with open(args.output, "w", encoding="utf-8") as f: + f.write(table) diff --git a/lm-evaluation/scripts/model_comparator.py b/lm-evaluation/scripts/model_comparator.py new file mode 100644 index 0000000000000000000000000000000000000000..26fe53961ab4972980fd55e30f89027a3098f8f2 --- /dev/null +++ b/lm-evaluation/scripts/model_comparator.py @@ -0,0 +1,138 @@ +import argparse +import os +from typing import Dict, List, Tuple + +import numpy as np +import pandas as pd +import scipy.stats +import torch + +import lm_eval.evaluator +import lm_eval.models.utils +from lm_eval import tasks, utils + + +os.environ["TOKENIZERS_PARALLELISM"] = "false" +eval_logger = utils.eval_logger + + +def memory_stats(): + eval_logger.info( + f"Memory allocated: {torch.cuda.memory_allocated() / 1024 ** 2}, reserved: {torch.cuda.memory_reserved() // 1024 ** 2}" + ) + + +def calculate_z_value(res1: Dict, res2: Dict) -> Tuple[float, float]: + acc1, acc2 = res1["acc,none"], res2["acc,none"] + st_err1, st_err2 = res1["acc_stderr,none"], res2["acc_stderr,none"] + Z = (acc1 - acc2) / np.sqrt((st_err1**2) + (st_err2**2)) + # Determining the p-value + p_value = 2 * scipy.stats.norm.sf(abs(Z)) # two-tailed test + return Z, p_value + + +def print_results( + data_to_print: List = None, results_dict: Dict = None, alpha: float = None +): + model1_data = data_to_print[0] + model2_data = data_to_print[1] + table_data = [] + for task in model1_data.keys(): + row = { + "Task": task, + "HF Accuracy": model1_data[task]["acc,none"], + "vLLM Accuracy": model2_data[task]["acc,none"], + "HF StdErr": model1_data[task]["acc_stderr,none"], + "vLLM StdErr": model2_data[task]["acc_stderr,none"], + } + table_data.append(row) + comparison_df = pd.DataFrame(table_data) + comparison_df["Z-Score"] = comparison_df["Task"].apply( + lambda task: results_dict[task]["z"] + ) + comparison_df["P-Value"] = comparison_df["Task"].apply( + lambda task: results_dict[task]["p_value"] + ) + comparison_df[f"p > {alpha}"] = comparison_df["P-Value"].apply( + lambda p: "✓" if p > alpha else "×" + ) + return comparison_df + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--pretrained", default="EleutherAI/pythia-70m", help="name of model to compare" + ) + parser.add_argument( + "--hf_args", help="huggingface model args =", default="" + ) + parser.add_argument("--vllm_args", help="vllm model args =", default="") + parser.add_argument("--tasks", type=str, default="arc_easy,hellaswag") + parser.add_argument( + "--limit", + type=float, + default=100, + ) + parser.add_argument( + "--alpha", + type=float, + default=0.05, + help="Significance level for two-tailed z-test", + ) + parser.add_argument( + "--device", + type=str, + default="cuda", + ) + parser.add_argument( + "--batch", + type=str, + default=8, + ) + parser.add_argument( + "--verbosity", + type=str, + default="INFO", + help="Logging verbosity", + ) + return parser.parse_args() + + +if __name__ == "__main__": + tasks.initialize_tasks() + args = parse_args() + tasks = args.tasks.split(",") + print(tasks) + hf_args, vllm_args = "," + args.hf_args, "," + args.vllm_args + results_vllm = lm_eval.evaluator.simple_evaluate( + model="vllm", + model_args=f"pretrained={args.pretrained}" + vllm_args, + tasks=tasks, + limit=args.limit, + device=args.device, + batch_size=args.batch, + ) + memory_stats() + lm_eval.models.utils.clear_torch_cache() + eval_logger.info("Memory stats cleared") + memory_stats() + results_hf = lm_eval.evaluator.simple_evaluate( + model="hf", + model_args=f"pretrained={args.pretrained}" + hf_args, + tasks=tasks, + limit=args.limit, + device=args.device, + batch_size=args.batch, + ) + all_res = {} + for task1, task2 in zip( + results_hf["results"].items(), results_vllm["results"].items() + ): + assert task1[0] == task2[0] + z, p_value = calculate_z_value(task1[1], task2[1]) + all_res[task1[0]] = {"z": z, "p_value": p_value} + df = print_results( + [results_hf["results"], results_vllm["results"]], all_res, args.alpha + ) + print(df) diff --git a/lm-evaluation/scripts/regression.py b/lm-evaluation/scripts/regression.py new file mode 100644 index 0000000000000000000000000000000000000000..75258dcb640a4f32a0011e864d390e9619f6e2e3 --- /dev/null +++ b/lm-evaluation/scripts/regression.py @@ -0,0 +1,199 @@ +import argparse +import json +import os +import subprocess +import time +from pathlib import Path + +from lm_eval import utils +from lm_eval.api.registry import ALL_TASKS + + +seq2seq_models = ["google/flan-t5-small"] +causal_models = [ + "gpt2", + "facebook/opt-125m", + "EleutherAI/gpt-neo-125m", + "EleutherAI/pythia-160m", +] +model_names = seq2seq_models + causal_models + + +completion_tasks = ["boolq", "lambada_openai", "winogrande"] +choice_tasks = ["hellaswag", "openbookqa", "piqa"] +perplexity_tasks = ["wikitext"] +generation_tasks = [] +task_names = completion_tasks + choice_tasks + perplexity_tasks + generation_tasks + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--branches", default=[]) + parser.add_argument("--models", default=model_names) + parser.add_argument("--tasks", default=task_names) + parser.add_argument("--acc_norm", type=bool, default=False) + parser.add_argument("--perplexity", default=None) + # TODO: implement num_fewshot and limit per task, e.g. task1:5,task2:1:100,task3::1000 + parser.add_argument("--num_fewshot", type=int, default=0) + parser.add_argument("--limit", type=float, default=None) + # TODO: implement hf-auto to pick between causal and seq2seq models so we don't need this + parser.add_argument("--model", default="hf-causal") + # Use whatever is faster here + parser.add_argument("--model_args", default="use_accelerate=True,load_in_8bit=True") + parser.add_argument("--batch_size", default="auto") + return parser.parse_args() + + +def eval_models(args, branch=None): + if branch is not None: + if os.system(f"git checkout {branch}") != 0: + return {}, 0 + + branch = branch or initial_branch + + start_time = time.time() + + results = {} + + for model in args.models: + model_type = ( + "hf-causal" + if model in causal_models + else "hf-seq2seq" + if model in seq2seq_models + else args.model + ) + model_args = f"pretrained={model},{args.model_args}" + # TODO: split_and_pad_windows in AutoSeq2SeqLM doesn"t exist, #527 + tasks = ( + args.tasks + if model in causal_models or model_type == "hf-causal" + else list(filter(lambda task: task not in perplexity_tasks, args.tasks)) + ) + # TODO: OOM with auto for seq2seq models, also can OOM with llama + batch_size = ( + args.batch_size + if model in causal_models or model_type == "hf-causal" + else 64 + if args.batch_size == "auto" + else args.batch_size + ) + output_path = ( + f"data/regression/{int(start_time)}-{branch}-{Path(model).name}.json" + ) + + command = ( + f"python3 main.py --model {model_type} --model_args {model_args} --tasks {','.join(tasks)} " + f"--num_fewshot {args.num_fewshot}{'' if args.limit is None else f' --limit {args.limit}'} " + f"--batch_size {batch_size} --no_cache --output_path {output_path}" + ) + + print( + f"{'=' * 80}\nEvaluating {model} on {', '.join(tasks)} at {branch} with:\n\n{command}\n{'=' * 80}" + ) + + ret = os.system(command) + + results[model] = ( + json.load(open(output_path, encoding="utf-8")) + if ret == 0 + else {"results": {}} + ) + + end_time = time.time() + + return results, end_time - start_time + + +def extract_value(args, results, model, task, err=False): + if model not in results: + return 0 + results = results[model]["results"] + if task not in results: + return 0 + results = results[task] + if args.acc_norm and "acc_norm,none" in results: + return results["acc_norm,none"] if not err else results["acc_norm_stderr,none"] + if "acc,none" in results: + return results["acc,none"] if not err else results["acc_stderr,none"] + if (args.perplexity or "word_perplexity") + ",none" in results: + return ( + results[(args.perplexity or "word_perplexity") + ",none"] if not err else 0 + ) + return 0 + + +def format_value(args, results, model, task): + val = 100 * extract_value(args, results, model, task) + err = 100 * extract_value(args, results, model, task, err=True) + return f"{val:.2f}{f' ± {err:.2f}' if err != 0 else ''}" + + +def format_diff(args, results1, results2, model, task): + val1 = 100 * extract_value(args, results1, model, task) + val2 = 100 * extract_value(args, results2, model, task) + diff = val2 - val1 + return f"**+{diff:.2f}**" if diff > 0 else f"{diff:.2f}" + + +def main(): + args = parse_args() + + args.branches = ( + args.branches.split(",") if isinstance(args.branches, str) else args.branches + ) + args.models = ( + args.models.split(",") if isinstance(args.models, str) else args.models + ) + args.tasks = ( + ALL_TASKS + if args.tasks == "all_tasks" + else utils.pattern_match(args.tasks.split(","), ALL_TASKS) + if isinstance(args.tasks, str) + else args.tasks + ) + + global initial_branch + initial_branch = ( + subprocess.check_output("git branch --show-current", shell=True) + .decode("ascii") + .strip() + ) + + # TODO: implement proper timing for each task + # TODO: reduce IO by sharing tasks between models? + + results, runtime = eval_models(args) + print(results, runtime) + + runs = [] + for branch in args.branches: + runs.append((branch, *eval_models(args, branch))) + + os.system(f"git checkout {initial_branch}") + + print("") + print(f"|task|{'|'.join(map(lambda model: Path(model).name, args.models))}|") + print(f"|--|{'--|' * len(args.models)}") + for task in args.tasks: + print( + f"|{task} ({initial_branch})|{'|'.join(map(lambda model: format_value(args, results, model, task), args.models))}|" + ) + for branch, branch_results, branch_runtime in runs: + print( + f"|{task} ({branch})|{'|'.join(map(lambda model: format_value(args, branch_results, model, task), args.models))}|" + ) + print( + f"|{task} (diff)|{'|'.join(map(lambda model: format_diff(args, results, branch_results, model, task), args.models))}|" + ) + + print("") + print("|branch|runtime|%|") + print("|--|--|--|") + print(f"|{initial_branch}|{runtime:.1f}s|100%|") + for branch, _, branch_runtime in runs: + print(f"|{branch}|{branch_runtime:.1f}s|{100 * branch_runtime / runtime:.2f}%|") + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/scripts/zeno_visualize.py b/lm-evaluation/scripts/zeno_visualize.py new file mode 100644 index 0000000000000000000000000000000000000000..5771f9d7a6d650441720e6bd54712c5dbe450262 --- /dev/null +++ b/lm-evaluation/scripts/zeno_visualize.py @@ -0,0 +1,219 @@ +import argparse +import json +import os +import re +from pathlib import Path + +import pandas as pd +from zeno_client import ZenoClient, ZenoMetric + +from lm_eval.utils import eval_logger + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Upload your data to the Zeno AI evaluation platform to visualize results. This requires a ZENO_API_KEY in your environment variables. The eleuther harness must be run with log_samples=True and an output_path set for data to be written to disk." + ) + parser.add_argument( + "--data_path", + required=True, + help="Where to find the results of the benchmarks that have been run. Uses the name of each subfolder as the model name.", + ) + parser.add_argument( + "--project_name", + required=True, + help="The name of the generated Zeno project.", + ) + return parser.parse_args() + + +def main(): + """Upload the results of your benchmark tasks to the Zeno AI evaluation platform. + + This scripts expects your results to live in a data folder where subfolders contain results of individual models. + """ + args = parse_args() + + client = ZenoClient(os.environ["ZENO_API_KEY"]) + + # Get all model subfolders from the parent data folder. + models = [ + os.path.basename(os.path.normpath(f)) + for f in os.scandir(Path(args.data_path)) + if f.is_dir() + ] + + assert len(models) > 0, "No model directories found in the data_path." + + tasks = set(tasks_for_model(models[0], args.data_path)) + + for model in models: # Make sure that all models have the same tasks. + old_tasks = tasks.copy() + task_count = len(tasks) + + model_tasks = tasks_for_model(model, args.data_path) + tasks.intersection(set(model_tasks)) + + if task_count != len(tasks): + eval_logger.warning( + f"All models must have the same tasks. {model} has tasks: {model_tasks} but have already recorded tasks: {old_tasks}. Taking intersection {tasks}" + ) + + assert ( + len(tasks) > 0 + ), "Must provide at least one task in common amongst models to compare." + + for task in tasks: + # Upload data for all models + for model_index, model in enumerate(models): + model_args = re.sub( + r"[\"<>:/\|\\?\*\[\]]+", + "__", + json.load( + open(Path(args.data_path, model, "results.json"), encoding="utf-8") + )["config"]["model_args"], + ) + with open( + Path(args.data_path, model, f"{model_args}_{task}.jsonl"), + "r", + encoding="utf-8", + ) as file: + data = json.loads(file.read()) + + configs = json.load( + open(Path(args.data_path, model, "results.json"), encoding="utf-8") + )["configs"] + config = configs[task] + + if model_index == 0: # Only need to assemble data for the first model + metrics = [] + for metric in config["metric_list"]: + metrics.append( + ZenoMetric( + name=metric["metric"], + type="mean", + columns=[metric["metric"]], + ) + ) + project = client.create_project( + name=args.project_name + (f"_{task}" if len(tasks) > 1 else ""), + view="text-classification", + metrics=metrics, + ) + project.upload_dataset( + generate_dataset(data, config), + id_column="id", + data_column="data", + label_column="labels", + ) + + project.upload_system( + generate_system_df(data, config), + name=model, + id_column="id", + output_column="output", + ) + + +def tasks_for_model(model: str, data_path: str): + """Get the tasks for a specific model. + + Args: + model (str): The name of the model. + data_path (str): The path to the data. + + Returns: + list: A list of tasks for the model. + """ + dir_path = Path(data_path, model) + config = ( + json.load(open(Path(dir_path, "results.json"), encoding="utf-8"))["configs"], + ) + return list(config[0].keys()) + + +def generate_dataset( + data, + config, +): + """Generate a Zeno dataset from evaluation data. + + Args: + data: The data to generate a dataset for. + config: The configuration of the task. + + Returns: + pd.Dataframe: A dataframe that is ready to be uploaded to Zeno. + """ + ids = [x["doc_id"] for x in data] + labels = [x["target"] for x in data] + instance = [""] * len(ids) + + if config["output_type"] == "loglikelihood": + instance = [x["arguments"][0][0] for x in data] + labels = [x["arguments"][0][1] for x in data] + elif config["output_type"] == "multiple_choice": + instance = [ + x["arguments"][0][0] + + "\n\n" + + "\n".join([f"- {y[1]}" for y in x["arguments"]]) + for x in data + ] + elif config["output_type"] == "loglikelihood_rolling": + instance = [x["arguments"][0][0] for x in data] + elif config["output_type"] == "generate_until": + instance = [x["arguments"][0][0] for x in data] + + return pd.DataFrame( + { + "id": ids, + "data": instance, + "input_len": [len(x) for x in instance], + "labels": labels, + "output_type": config["output_type"], + } + ) + + +def generate_system_df(data, config): + """Generate a dataframe for a specific system to be uploaded to Zeno. + + Args: + data: The data to generate a dataframe from. + config: The configuration of the task. + + Returns: + pd.Dataframe: A dataframe that is ready to be uploaded to Zeno as a system. + """ + ids = [x["doc_id"] for x in data] + system_dict = {"id": ids} + system_dict["output"] = [""] * len(ids) + + if config["output_type"] == "loglikelihood": + system_dict["output"] = [ + "correct" if x["filtered_resps"][0][1] is True else "incorrect" + for x in data + ] + elif config["output_type"] == "multiple_choice": + system_dict["output"] = [ + ", ".join([str(y[0]) for y in x["filtered_resps"]]) for x in data + ] + system_dict["num_answers"] = [len(x["filtered_resps"]) for x in data] + elif config["output_type"] == "loglikelihood_rolling": + system_dict["output"] = [str(x["filtered_resps"][0]) for x in data] + elif config["output_type"] == "generate_until": + system_dict["output"] = [str(x["filtered_resps"][0]) for x in data] + system_dict["output_length"] = [len(str(x["filtered_resps"][0])) for x in data] + + metrics = {} + for metric in config["metric_list"]: + if "aggregation" in metric and metric["aggregation"] == "mean": + metrics[metric["metric"]] = [x[metric["metric"]] for x in data] + + system_dict.update(metrics) + system_df = pd.DataFrame(system_dict) + return system_df + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/templates/new_yaml_task/README.md b/lm-evaluation/templates/new_yaml_task/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3028712286ef77e503c97ffbe5ce39c7ec7b14e2 --- /dev/null +++ b/lm-evaluation/templates/new_yaml_task/README.md @@ -0,0 +1,42 @@ +# Task-name + +### Paper + +Title: `paper titles goes here` + +Abstract: `link to paper PDF or arXiv abstract goes here` + +`Short description of paper / benchmark goes here:` + +Homepage: `homepage to the benchmark's website goes here, if applicable` + + +### Citation + +``` +BibTeX-formatted citation goes here +``` + +### Groups and Tasks + +#### Groups + +* `group_name`: `Short description` + +#### Tasks + +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/templates/new_yaml_task/blank_yaml.yaml b/lm-evaluation/templates/new_yaml_task/blank_yaml.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391