diff --git a/env-llmeval/bin/Activate.ps1 b/env-llmeval/bin/Activate.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..b49d77ba44b24fe6d69f6bbe75139b3b5dc23075 --- /dev/null +++ b/env-llmeval/bin/Activate.ps1 @@ -0,0 +1,247 @@ +<# +.Synopsis +Activate a Python virtual environment for the current PowerShell session. + +.Description +Pushes the python executable for a virtual environment to the front of the +$Env:PATH environment variable and sets the prompt to signify that you are +in a Python virtual environment. Makes use of the command line switches as +well as the `pyvenv.cfg` file values present in the virtual environment. + +.Parameter VenvDir +Path to the directory that contains the virtual environment to activate. The +default value for this is the parent of the directory that the Activate.ps1 +script is located within. + +.Parameter Prompt +The prompt prefix to display when this virtual environment is activated. By +default, this prompt is the name of the virtual environment folder (VenvDir) +surrounded by parentheses and followed by a single space (ie. '(.venv) '). + +.Example +Activate.ps1 +Activates the Python virtual environment that contains the Activate.ps1 script. + +.Example +Activate.ps1 -Verbose +Activates the Python virtual environment that contains the Activate.ps1 script, +and shows extra information about the activation as it executes. + +.Example +Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv +Activates the Python virtual environment located in the specified location. + +.Example +Activate.ps1 -Prompt "MyPython" +Activates the Python virtual environment that contains the Activate.ps1 script, +and prefixes the current prompt with the specified string (surrounded in +parentheses) while the virtual environment is active. + +.Notes +On Windows, it may be required to enable this Activate.ps1 script by setting the +execution policy for the user. You can do this by issuing the following PowerShell +command: + +PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser + +For more information on Execution Policies: +https://go.microsoft.com/fwlink/?LinkID=135170 + +#> +Param( + [Parameter(Mandatory = $false)] + [String] + $VenvDir, + [Parameter(Mandatory = $false)] + [String] + $Prompt +) + +<# Function declarations --------------------------------------------------- #> + +<# +.Synopsis +Remove all shell session elements added by the Activate script, including the +addition of the virtual environment's Python executable from the beginning of +the PATH variable. + +.Parameter NonDestructive +If present, do not remove this function from the global namespace for the +session. + +#> +function global:deactivate ([switch]$NonDestructive) { + # Revert to original values + + # The prior prompt: + if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { + Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt + Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT + } + + # The prior PYTHONHOME: + if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { + Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME + Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME + } + + # The prior PATH: + if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { + Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH + Remove-Item -Path Env:_OLD_VIRTUAL_PATH + } + + # Just remove the VIRTUAL_ENV altogether: + if (Test-Path -Path Env:VIRTUAL_ENV) { + Remove-Item -Path env:VIRTUAL_ENV + } + + # Just remove VIRTUAL_ENV_PROMPT altogether. + if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) { + Remove-Item -Path env:VIRTUAL_ENV_PROMPT + } + + # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: + if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { + Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force + } + + # Leave deactivate function in the global namespace if requested: + if (-not $NonDestructive) { + Remove-Item -Path function:deactivate + } +} + +<# +.Description +Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the +given folder, and returns them in a map. + +For each line in the pyvenv.cfg file, if that line can be parsed into exactly +two strings separated by `=` (with any amount of whitespace surrounding the =) +then it is considered a `key = value` line. The left hand string is the key, +the right hand is the value. + +If the value starts with a `'` or a `"` then the first and last character is +stripped from the value before being captured. + +.Parameter ConfigDir +Path to the directory that contains the `pyvenv.cfg` file. +#> +function Get-PyVenvConfig( + [String] + $ConfigDir +) { + Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" + + # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). + $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue + + # An empty map will be returned if no config file is found. + $pyvenvConfig = @{ } + + if ($pyvenvConfigPath) { + + Write-Verbose "File exists, parse `key = value` lines" + $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath + + $pyvenvConfigContent | ForEach-Object { + $keyval = $PSItem -split "\s*=\s*", 2 + if ($keyval[0] -and $keyval[1]) { + $val = $keyval[1] + + # Remove extraneous quotations around a string value. + if ("'""".Contains($val.Substring(0, 1))) { + $val = $val.Substring(1, $val.Length - 2) + } + + $pyvenvConfig[$keyval[0]] = $val + Write-Verbose "Adding Key: '$($keyval[0])'='$val'" + } + } + } + return $pyvenvConfig +} + + +<# Begin Activate script --------------------------------------------------- #> + +# Determine the containing directory of this script +$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition +$VenvExecDir = Get-Item -Path $VenvExecPath + +Write-Verbose "Activation script is located in path: '$VenvExecPath'" +Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" +Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" + +# Set values required in priority: CmdLine, ConfigFile, Default +# First, get the location of the virtual environment, it might not be +# VenvExecDir if specified on the command line. +if ($VenvDir) { + Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" +} +else { + Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." + $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") + Write-Verbose "VenvDir=$VenvDir" +} + +# Next, read the `pyvenv.cfg` file to determine any required value such +# as `prompt`. +$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir + +# Next, set the prompt from the command line, or the config file, or +# just use the name of the virtual environment folder. +if ($Prompt) { + Write-Verbose "Prompt specified as argument, using '$Prompt'" +} +else { + Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" + if ($pyvenvCfg -and $pyvenvCfg['prompt']) { + Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" + $Prompt = $pyvenvCfg['prompt']; + } + else { + Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)" + Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" + $Prompt = Split-Path -Path $venvDir -Leaf + } +} + +Write-Verbose "Prompt = '$Prompt'" +Write-Verbose "VenvDir='$VenvDir'" + +# Deactivate any currently active virtual environment, but leave the +# deactivate function in place. +deactivate -nondestructive + +# Now set the environment variable VIRTUAL_ENV, used by many tools to determine +# that there is an activated venv. +$env:VIRTUAL_ENV = $VenvDir + +if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { + + Write-Verbose "Setting prompt to '$Prompt'" + + # Set the prompt to include the env name + # Make sure _OLD_VIRTUAL_PROMPT is global + function global:_OLD_VIRTUAL_PROMPT { "" } + Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT + New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt + + function global:prompt { + Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " + _OLD_VIRTUAL_PROMPT + } + $env:VIRTUAL_ENV_PROMPT = $Prompt +} + +# Clear PYTHONHOME +if (Test-Path -Path Env:PYTHONHOME) { + Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME + Remove-Item -Path Env:PYTHONHOME +} + +# Add the venv to the PATH +Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH +$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/env-llmeval/bin/accelerate b/env-llmeval/bin/accelerate new file mode 100644 index 0000000000000000000000000000000000000000..0738059a1edd02569882b4e963f06ff0224d0bdf --- /dev/null +++ b/env-llmeval/bin/accelerate @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.accelerate_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/accelerate-config b/env-llmeval/bin/accelerate-config new file mode 100644 index 0000000000000000000000000000000000000000..41e59a7eb7d73f5563d6fa17c8eca6350d11b7d1 --- /dev/null +++ b/env-llmeval/bin/accelerate-config @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.config import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/accelerate-estimate-memory b/env-llmeval/bin/accelerate-estimate-memory new file mode 100644 index 0000000000000000000000000000000000000000..1c33b2085b35217cff14c94c0ca0c9b2c40c7318 --- /dev/null +++ b/env-llmeval/bin/accelerate-estimate-memory @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.estimate import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/accelerate-launch b/env-llmeval/bin/accelerate-launch new file mode 100644 index 0000000000000000000000000000000000000000..fa67bd2d2e29395476cbcf23d518628d44730002 --- /dev/null +++ b/env-llmeval/bin/accelerate-launch @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from accelerate.commands.launch import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/activate b/env-llmeval/bin/activate new file mode 100644 index 0000000000000000000000000000000000000000..c1efda64569dca18479e247c2ab7ce21da8e4221 --- /dev/null +++ b/env-llmeval/bin/activate @@ -0,0 +1,69 @@ +# This file must be used with "source bin/activate" *from bash* +# you cannot run it directly + +deactivate () { + # reset old environment variables + if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then + PATH="${_OLD_VIRTUAL_PATH:-}" + export PATH + unset _OLD_VIRTUAL_PATH + fi + if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then + PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" + export PYTHONHOME + unset _OLD_VIRTUAL_PYTHONHOME + fi + + # This should detect bash and zsh, which have a hash command that must + # be called to get it to forget past commands. Without forgetting + # past commands the $PATH changes we made may not be respected + if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null + fi + + if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then + PS1="${_OLD_VIRTUAL_PS1:-}" + export PS1 + unset _OLD_VIRTUAL_PS1 + fi + + unset VIRTUAL_ENV + unset VIRTUAL_ENV_PROMPT + if [ ! "${1:-}" = "nondestructive" ] ; then + # Self destruct! + unset -f deactivate + fi +} + +# unset irrelevant variables +deactivate nondestructive + +VIRTUAL_ENV="/home/sdp/llm_eval/env-llmeval" +export VIRTUAL_ENV + +_OLD_VIRTUAL_PATH="$PATH" +PATH="$VIRTUAL_ENV/bin:$PATH" +export PATH + +# unset PYTHONHOME if set +# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) +# could use `if (set -u; : $PYTHONHOME) ;` in bash +if [ -n "${PYTHONHOME:-}" ] ; then + _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" + unset PYTHONHOME +fi + +if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then + _OLD_VIRTUAL_PS1="${PS1:-}" + PS1="(env-llmeval) ${PS1:-}" + export PS1 + VIRTUAL_ENV_PROMPT="(env-llmeval) " + export VIRTUAL_ENV_PROMPT +fi + +# This should detect bash and zsh, which have a hash command that must +# be called to get it to forget past commands. Without forgetting +# past commands the $PATH changes we made may not be respected +if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then + hash -r 2> /dev/null +fi diff --git a/env-llmeval/bin/activate.csh b/env-llmeval/bin/activate.csh new file mode 100644 index 0000000000000000000000000000000000000000..a19ee01f9742231e9ebf119d77b2390d2cd36c76 --- /dev/null +++ b/env-llmeval/bin/activate.csh @@ -0,0 +1,26 @@ +# This file must be used with "source bin/activate.csh" *from csh*. +# You cannot run it directly. +# Created by Davide Di Blasi . +# Ported to Python 3.3 venv by Andrew Svetlov + +alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate' + +# Unset irrelevant variables. +deactivate nondestructive + +setenv VIRTUAL_ENV "/home/sdp/llm_eval/env-llmeval" + +set _OLD_VIRTUAL_PATH="$PATH" +setenv PATH "$VIRTUAL_ENV/bin:$PATH" + + +set _OLD_VIRTUAL_PROMPT="$prompt" + +if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then + set prompt = "(env-llmeval) $prompt" + setenv VIRTUAL_ENV_PROMPT "(env-llmeval) " +endif + +alias pydoc python -m pydoc + +rehash diff --git a/env-llmeval/bin/activate.fish b/env-llmeval/bin/activate.fish new file mode 100644 index 0000000000000000000000000000000000000000..709c448b01bb305b629f60bdaf1271c69f70e184 --- /dev/null +++ b/env-llmeval/bin/activate.fish @@ -0,0 +1,69 @@ +# This file must be used with "source /bin/activate.fish" *from fish* +# (https://fishshell.com/); you cannot run it directly. + +function deactivate -d "Exit virtual environment and return to normal shell environment" + # reset old environment variables + if test -n "$_OLD_VIRTUAL_PATH" + set -gx PATH $_OLD_VIRTUAL_PATH + set -e _OLD_VIRTUAL_PATH + end + if test -n "$_OLD_VIRTUAL_PYTHONHOME" + set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME + set -e _OLD_VIRTUAL_PYTHONHOME + end + + if test -n "$_OLD_FISH_PROMPT_OVERRIDE" + set -e _OLD_FISH_PROMPT_OVERRIDE + # prevents error when using nested fish instances (Issue #93858) + if functions -q _old_fish_prompt + functions -e fish_prompt + functions -c _old_fish_prompt fish_prompt + functions -e _old_fish_prompt + end + end + + set -e VIRTUAL_ENV + set -e VIRTUAL_ENV_PROMPT + if test "$argv[1]" != "nondestructive" + # Self-destruct! + functions -e deactivate + end +end + +# Unset irrelevant variables. +deactivate nondestructive + +set -gx VIRTUAL_ENV "/home/sdp/llm_eval/env-llmeval" + +set -gx _OLD_VIRTUAL_PATH $PATH +set -gx PATH "$VIRTUAL_ENV/bin" $PATH + +# Unset PYTHONHOME if set. +if set -q PYTHONHOME + set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME + set -e PYTHONHOME +end + +if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" + # fish uses a function instead of an env var to generate the prompt. + + # Save the current fish_prompt function as the function _old_fish_prompt. + functions -c fish_prompt _old_fish_prompt + + # With the original prompt function renamed, we can override with our own. + function fish_prompt + # Save the return status of the last command. + set -l old_status $status + + # Output the venv prompt; color taken from the blue of the Python logo. + printf "%s%s%s" (set_color 4B8BBE) "(env-llmeval) " (set_color normal) + + # Restore the return status of the previous command. + echo "exit $old_status" | . + # Output the original/"old" prompt. + _old_fish_prompt + end + + set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" + set -gx VIRTUAL_ENV_PROMPT "(env-llmeval) " +end diff --git a/env-llmeval/bin/chardetect b/env-llmeval/bin/chardetect new file mode 100644 index 0000000000000000000000000000000000000000..889f776b2013c9e7868b028a1dd43ad0e482a90c --- /dev/null +++ b/env-llmeval/bin/chardetect @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from chardet.cli.chardetect import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/convert-caffe2-to-onnx b/env-llmeval/bin/convert-caffe2-to-onnx new file mode 100644 index 0000000000000000000000000000000000000000..0757891ec221c119f76a151ac3a109f9604ae3bd --- /dev/null +++ b/env-llmeval/bin/convert-caffe2-to-onnx @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from caffe2.python.onnx.bin.conversion import caffe2_to_onnx +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(caffe2_to_onnx()) diff --git a/env-llmeval/bin/convert-onnx-to-caffe2 b/env-llmeval/bin/convert-onnx-to-caffe2 new file mode 100644 index 0000000000000000000000000000000000000000..aa65ddbbe02fbcc900a263a332da44252b1193fb --- /dev/null +++ b/env-llmeval/bin/convert-onnx-to-caffe2 @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from caffe2.python.onnx.bin.conversion import onnx_to_caffe2 +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(onnx_to_caffe2()) diff --git a/env-llmeval/bin/datasets-cli b/env-llmeval/bin/datasets-cli new file mode 100644 index 0000000000000000000000000000000000000000..edfa558136b523a56faa51e4a1f23ce41f4842de --- /dev/null +++ b/env-llmeval/bin/datasets-cli @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from datasets.commands.datasets_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/evaluate-cli b/env-llmeval/bin/evaluate-cli new file mode 100644 index 0000000000000000000000000000000000000000..d8ceed6c483b8b204b0c878e858bd80830a1fdf4 --- /dev/null +++ b/env-llmeval/bin/evaluate-cli @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from evaluate.commands.evaluate_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/get_gprof b/env-llmeval/bin/get_gprof new file mode 100644 index 0000000000000000000000000000000000000000..1c93d4192d8f0f44ddf90136e73490034de4da38 --- /dev/null +++ b/env-llmeval/bin/get_gprof @@ -0,0 +1,75 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +''' +build profile graph for the given instance + +running: + $ get_gprof + +executes: + gprof2dot -f pstats .prof | dot -Tpng -o .call.png + +where: + are arguments for gprof2dot, such as "-n 5 -e 5" + is code to create the instance to profile + is the class of the instance (i.e. type(instance)) + +For example: + $ get_gprof -n 5 -e 1 "import numpy; numpy.array([1,2])" + +will create 'ndarray.call.png' with the profile graph for numpy.array([1,2]), +where '-n 5' eliminates nodes below 5% threshold, similarly '-e 1' eliminates +edges below 1% threshold +''' + +if __name__ == "__main__": + import sys + if len(sys.argv) < 2: + print ("Please provide an object instance (e.g. 'import math; math.pi')") + sys.exit() + # grab args for gprof2dot + args = sys.argv[1:-1] + args = ' '.join(args) + # last arg builds the object + obj = sys.argv[-1] + obj = obj.split(';') + # multi-line prep for generating an instance + for line in obj[:-1]: + exec(line) + # one-line generation of an instance + try: + obj = eval(obj[-1]) + except Exception: + print ("Error processing object instance") + sys.exit() + + # get object 'name' + objtype = type(obj) + name = getattr(objtype, '__name__', getattr(objtype, '__class__', objtype)) + + # profile dumping an object + import dill + import os + import cProfile + #name = os.path.splitext(os.path.basename(__file__))[0] + cProfile.run("dill.dumps(obj)", filename="%s.prof" % name) + msg = "gprof2dot -f pstats %s %s.prof | dot -Tpng -o %s.call.png" % (args, name, name) + try: + res = os.system(msg) + except Exception: + print ("Please verify install of 'gprof2dot' to view profile graphs") + if res: + print ("Please verify install of 'gprof2dot' to view profile graphs") + + # get stats + f_prof = "%s.prof" % name + import pstats + stats = pstats.Stats(f_prof, stream=sys.stdout) + stats.strip_dirs().sort_stats('cumtime') + stats.print_stats(20) #XXX: save to file instead of print top 20? + os.remove(f_prof) diff --git a/env-llmeval/bin/get_objgraph b/env-llmeval/bin/get_objgraph new file mode 100644 index 0000000000000000000000000000000000000000..5566a8fd953819e6c10050e52d0fa5ac1b4d1aa6 --- /dev/null +++ b/env-llmeval/bin/get_objgraph @@ -0,0 +1,54 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +display the reference paths for objects in ``dill.types`` or a .pkl file + +Notes: + the generated image is useful in showing the pointer references in + objects that are or can be pickled. Any object in ``dill.objects`` + listed in ``dill.load_types(picklable=True, unpicklable=True)`` works. + +Examples:: + + $ get_objgraph ArrayType + Image generated as ArrayType.png +""" + +import dill as pickle +#pickle.debug.trace(True) +#import pickle + +# get all objects for testing +from dill import load_types +load_types(pickleable=True,unpickleable=True) +from dill import objects + +if __name__ == "__main__": + import sys + if len(sys.argv) != 2: + print ("Please provide exactly one file or type name (e.g. 'IntType')") + msg = "\n" + for objtype in list(objects.keys())[:40]: + msg += objtype + ', ' + print (msg + "...") + else: + objtype = str(sys.argv[-1]) + try: + obj = objects[objtype] + except KeyError: + obj = pickle.load(open(objtype,'rb')) + import os + objtype = os.path.splitext(objtype)[0] + try: + import objgraph + objgraph.show_refs(obj, filename=objtype+'.png') + except ImportError: + print ("Please install 'objgraph' to view object graphs") + + +# EOF diff --git a/env-llmeval/bin/huggingface-cli b/env-llmeval/bin/huggingface-cli new file mode 100644 index 0000000000000000000000000000000000000000..8b39c7be5438444bcc0537f6d9df1daaa35355cf --- /dev/null +++ b/env-llmeval/bin/huggingface-cli @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from huggingface_hub.commands.huggingface_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/isympy b/env-llmeval/bin/isympy new file mode 100644 index 0000000000000000000000000000000000000000..d657daa36d41a82773a341aa529a3ca66e1720a9 --- /dev/null +++ b/env-llmeval/bin/isympy @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from isympy import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/lm-eval b/env-llmeval/bin/lm-eval new file mode 100644 index 0000000000000000000000000000000000000000..bad9ce1b40370dc69809faf9ebaa091f1ba80d3c --- /dev/null +++ b/env-llmeval/bin/lm-eval @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from lm_eval.__main__ import cli_evaluate +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_evaluate()) diff --git a/env-llmeval/bin/lm_eval b/env-llmeval/bin/lm_eval new file mode 100644 index 0000000000000000000000000000000000000000..bad9ce1b40370dc69809faf9ebaa091f1ba80d3c --- /dev/null +++ b/env-llmeval/bin/lm_eval @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from lm_eval.__main__ import cli_evaluate +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_evaluate()) diff --git a/env-llmeval/bin/nltk b/env-llmeval/bin/nltk new file mode 100644 index 0000000000000000000000000000000000000000..ca8a3e270838f8535c630ab62c488f5e2ad85552 --- /dev/null +++ b/env-llmeval/bin/nltk @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from nltk.cli import cli +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli()) diff --git a/env-llmeval/bin/normalizer b/env-llmeval/bin/normalizer new file mode 100644 index 0000000000000000000000000000000000000000..36e8ac8699930f3fdbc900e95c149cbfdf2e61b6 --- /dev/null +++ b/env-llmeval/bin/normalizer @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from charset_normalizer.cli import cli_detect +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(cli_detect()) diff --git a/env-llmeval/bin/pip b/env-llmeval/bin/pip new file mode 100644 index 0000000000000000000000000000000000000000..67eaaed1df5cc3b1b09211586a30ecce426f34f6 --- /dev/null +++ b/env-llmeval/bin/pip @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/pip3 b/env-llmeval/bin/pip3 new file mode 100644 index 0000000000000000000000000000000000000000..67eaaed1df5cc3b1b09211586a30ecce426f34f6 --- /dev/null +++ b/env-llmeval/bin/pip3 @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/pip3.10 b/env-llmeval/bin/pip3.10 new file mode 100644 index 0000000000000000000000000000000000000000..67eaaed1df5cc3b1b09211586a30ecce426f34f6 --- /dev/null +++ b/env-llmeval/bin/pip3.10 @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pip._internal.cli.main import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/pybind11-config b/env-llmeval/bin/pybind11-config new file mode 100644 index 0000000000000000000000000000000000000000..27410008c1d7b8a1741d1930907156a92f1cffa1 --- /dev/null +++ b/env-llmeval/bin/pybind11-config @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from pybind11.__main__ import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/sacrebleu b/env-llmeval/bin/sacrebleu new file mode 100644 index 0000000000000000000000000000000000000000..c72389b05bc29773dca066018483285b66ad4131 --- /dev/null +++ b/env-llmeval/bin/sacrebleu @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from sacrebleu.sacrebleu import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/tabulate b/env-llmeval/bin/tabulate new file mode 100644 index 0000000000000000000000000000000000000000..d44e74a6dd5cb7eefe32d2e67f38a0cc8b035dc8 --- /dev/null +++ b/env-llmeval/bin/tabulate @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tabulate import _main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(_main()) diff --git a/env-llmeval/bin/torchrun b/env-llmeval/bin/torchrun new file mode 100644 index 0000000000000000000000000000000000000000..c76790d01513a9e06a76b82524eb007fdea0c21c --- /dev/null +++ b/env-llmeval/bin/torchrun @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from torch.distributed.run import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/tqdm b/env-llmeval/bin/tqdm new file mode 100644 index 0000000000000000000000000000000000000000..18f92b8ff71a3bdcfd33c4ce3cfad9e0017e830e --- /dev/null +++ b/env-llmeval/bin/tqdm @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from tqdm.cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/transformers-cli b/env-llmeval/bin/transformers-cli new file mode 100644 index 0000000000000000000000000000000000000000..8a20b687477f148a8e3892592fdc33348c4791d3 --- /dev/null +++ b/env-llmeval/bin/transformers-cli @@ -0,0 +1,8 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from transformers.commands.transformers_cli import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/env-llmeval/bin/undill b/env-llmeval/bin/undill new file mode 100644 index 0000000000000000000000000000000000000000..c472f78975d718bd92936aac2531db5e5c7ed64b --- /dev/null +++ b/env-llmeval/bin/undill @@ -0,0 +1,22 @@ +#!/home/sdp/llm_eval/env-llmeval/bin/python3 +# +# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) +# Copyright (c) 2008-2016 California Institute of Technology. +# Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. +# License: 3-clause BSD. The full license text is available at: +# - https://github.com/uqfoundation/dill/blob/master/LICENSE +""" +unpickle the contents of a pickled object file + +Examples:: + + $ undill hello.pkl + ['hello', 'world'] +""" + +if __name__ == '__main__': + import sys + import dill + for file in sys.argv[1:]: + print (dill.load(open(file,'rb'))) + diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d11498f2bf1000659376558406b4a1cc456761f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +__version__ = "0.29.2" + +from .accelerator import Accelerator +from .big_modeling import ( + cpu_offload, + cpu_offload_with_hook, + disk_offload, + dispatch_model, + init_empty_weights, + init_on_device, + load_checkpoint_and_dispatch, +) +from .data_loader import skip_first_batches +from .inference import prepare_pippy +from .launchers import debug_launcher, notebook_launcher +from .state import PartialState +from .utils import ( + AutocastKwargs, + DataLoaderConfiguration, + DeepSpeedPlugin, + DistributedDataParallelKwargs, + DistributedType, + FullyShardedDataParallelPlugin, + GradScalerKwargs, + InitProcessGroupKwargs, + find_executable_batch_size, + infer_auto_device_map, + is_rich_available, + load_checkpoint_in_model, + synchronize_rng_states, +) + + +if is_rich_available(): + from .utils import rich diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/checkpointing.py b/env-llmeval/lib/python3.10/site-packages/accelerate/checkpointing.py new file mode 100644 index 0000000000000000000000000000000000000000..307eca49d7c93dcbf450c3de5f5f356e11db1d51 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/checkpointing.py @@ -0,0 +1,275 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +from pathlib import Path +from typing import List + +import numpy as np +import torch +from safetensors.torch import load_file +from torch.cuda.amp import GradScaler + +from .utils import ( + MODEL_NAME, + OPTIMIZER_NAME, + RNG_STATE_NAME, + SAFE_MODEL_NAME, + SAFE_WEIGHTS_NAME, + SAMPLER_NAME, + SCALER_NAME, + SCHEDULER_NAME, + WEIGHTS_NAME, + get_pretty_name, + is_torch_xla_available, + is_xpu_available, + save, +) + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +from .logging import get_logger +from .state import PartialState + + +logger = get_logger(__name__) + + +def save_accelerator_state( + output_dir: str, + model_states: List[dict], + optimizers: list, + schedulers: list, + dataloaders: list, + process_index: int, + scaler: GradScaler = None, + save_on_each_node: bool = False, + safe_serialization: bool = True, +): + """ + Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory. + + + + If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native + `pickle`. + + + + Args: + output_dir (`str` or `os.PathLike`): + The name of the folder to save all relevant weights and states. + model_states (`List[torch.nn.Module]`): + A list of model states + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + dataloaders (`List[torch.utils.data.DataLoader]`): + A list of dataloader instances to save their sampler states + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional gradient scaler instance to save + save_on_each_node (`bool`, *optional*): + Whether to save on every node, or only the main node. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + """ + output_dir = Path(output_dir) + # Model states + for i, state in enumerate(model_states): + weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME + if i > 0: + weights_name = weights_name.replace(".", f"_{i}.") + output_model_file = output_dir.joinpath(weights_name) + save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization) + logger.info(f"Model weights saved in {output_model_file}") + # Optimizer states + for i, opt in enumerate(optimizers): + state = opt.state_dict() + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + output_optimizer_file = output_dir.joinpath(optimizer_name) + save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Optimizer state saved in {output_optimizer_file}") + # Scheduler states + for i, scheduler in enumerate(schedulers): + state = scheduler.state_dict() + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + output_scheduler_file = output_dir.joinpath(scheduler_name) + save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Scheduler state saved in {output_scheduler_file}") + # DataLoader states + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + output_sampler_file = output_dir.joinpath(sampler_name) + # Only save if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.sampler.sampler + + if isinstance(sampler, SeedableRandomSampler): + save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False) + logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}") + + # GradScaler state + if scaler is not None: + state = scaler.state_dict() + output_scaler_file = output_dir.joinpath(SCALER_NAME) + torch.save(state, output_scaler_file) + logger.info(f"Gradient scaler state saved in {output_scaler_file}") + # Random number generator states + states = {} + states_name = f"{RNG_STATE_NAME}_{process_index}.pkl" + states["random_state"] = random.getstate() + states["numpy_random_seed"] = np.random.get_state() + states["torch_manual_seed"] = torch.get_rng_state() + if is_xpu_available(): + states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all() + else: + states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all() + if is_torch_xla_available(): + states["xm_seed"] = xm.get_rng_state() + output_states_file = output_dir.joinpath(states_name) + torch.save(states, output_states_file) + logger.info(f"Random states saved in {output_states_file}") + return output_dir + + +def load_accelerator_state( + input_dir, + models, + optimizers, + schedulers, + dataloaders, + process_index, + scaler=None, + map_location=None, + **load_model_func_kwargs, +): + """ + Loads states of the models, optimizers, scaler, and RNG generators from a given directory. + + Args: + input_dir (`str` or `os.PathLike`): + The name of the folder to load all relevant weights and states. + models (`List[torch.nn.Module]`): + A list of model instances + optimizers (`List[torch.optim.Optimizer]`): + A list of optimizer instances + schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`): + A list of learning rate schedulers + process_index (`int`): + The current process index in the Accelerator state + scaler (`torch.cuda.amp.GradScaler`, *optional*): + An optional *GradScaler* instance to load + map_location (`str`, *optional*): + What device to load the optimizer state onto. Should be one of either "cpu" or "on_device". + load_model_func_kwargs (`dict`, *optional*): + Additional arguments that can be passed to the model's `load_state_dict` method. + """ + if map_location not in [None, "cpu", "on_device"]: + raise TypeError( + "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`" + ) + if map_location is None: + map_location = "cpu" + elif map_location == "on_device": + map_location = PartialState().device + + input_dir = Path(input_dir) + # Model states + for i, model in enumerate(models): + ending = f"_{i}" if i > 0 else "" + input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors") + if input_model_file.exists(): + state_dict = load_file(input_model_file, device=str(map_location)) + else: + # Load with torch + input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin") + state_dict = torch.load(input_model_file, map_location=map_location) + models[i].load_state_dict(state_dict, **load_model_func_kwargs) + logger.info("All model weights loaded successfully") + + # Optimizer states + for i, opt in enumerate(optimizers): + optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin" + input_optimizer_file = input_dir.joinpath(optimizer_name) + optimizer_state = torch.load(input_optimizer_file, map_location=map_location) + optimizers[i].load_state_dict(optimizer_state) + logger.info("All optimizer states loaded successfully") + + # Scheduler states + for i, scheduler in enumerate(schedulers): + scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin" + input_scheduler_file = input_dir.joinpath(scheduler_name) + scheduler.load_state_dict(torch.load(input_scheduler_file)) + logger.info("All scheduler states loaded successfully") + + for i, dataloader in enumerate(dataloaders): + sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin" + input_sampler_file = input_dir.joinpath(sampler_name) + # Only load if we have our custom sampler + from .data_loader import IterableDatasetShard, SeedableRandomSampler + + if isinstance(dataloader.dataset, IterableDatasetShard): + sampler = dataloader.sampler.sampler + + if isinstance(sampler, SeedableRandomSampler): + dataloader.sampler.sampler = torch.load(input_sampler_file) + logger.info("All dataloader sampler states loaded successfully") + + # GradScaler state + if scaler is not None: + input_scaler_file = input_dir.joinpath(SCALER_NAME) + scaler.load_state_dict(torch.load(input_scaler_file)) + logger.info("GradScaler state loaded successfully") + + # Random states + try: + states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl")) + random.setstate(states["random_state"]) + np.random.set_state(states["numpy_random_seed"]) + torch.set_rng_state(states["torch_manual_seed"]) + if is_xpu_available(): + torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"]) + else: + torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"]) + if is_torch_xla_available(): + xm.set_rng_state(states["xm_seed"]) + logger.info("All random states loaded successfully") + except Exception: + logger.info("Could not load random states") + + +def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False): + """ + Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl` + """ + # Should this be the right way to get a qual_name type value from `obj`? + save_location = Path(path) / f"custom_checkpoint_{index}.pkl" + logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}") + save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node) + + +def load_custom_state(obj, path, index: int = 0): + """ + Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl` + """ + load_location = f"{path}/custom_checkpoint_{index}.pkl" + logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}") + obj.load_state_dict(torch.load(load_location, map_location="cpu")) diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/launchers.py b/env-llmeval/lib/python3.10/site-packages/accelerate/launchers.py new file mode 100644 index 0000000000000000000000000000000000000000..0265b25187f813356cfb49768097d6cf2599b0d3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/launchers.py @@ -0,0 +1,258 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import tempfile + +import torch + +from .state import AcceleratorState, PartialState +from .utils import ( + PrecisionType, + PrepareForLaunch, + are_libraries_initialized, + check_cuda_p2p_ib_support, + get_gpu_info, + is_mps_available, + patch_environment, +) + + +def test_launch(): + "Verify a `PartialState` can be initialized." + _ = PartialState() + + +def notebook_launcher( + function, + args=(), + num_processes=None, + mixed_precision="no", + use_port="29500", + master_addr="127.0.0.1", + node_rank=0, + num_nodes=1, +): + """ + Launches a training function, using several processes or multiple nodes if it's possible in the current environment + (TPU with multiple cores for instance). + + + + To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If + any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability. + + Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none + of those calls have been made. + + + + Args: + function (`Callable`): + The training function to execute. If it accepts arguments, the first argument should be the index of the + process run. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*): + The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to + the number of GPUs available otherwise. + mixed_precision (`str`, *optional*, defaults to `"no"`): + If `fp16` or `bf16`, will use mixed precision training on multi-GPU. + use_port (`str`, *optional*, defaults to `"29500"`): + The port to use to communicate between processes when launching a multi-GPU training. + master_addr (`str`, *optional*, defaults to `"127.0.0.1"`): + The address to use for communication between processes. + node_rank (`int`, *optional*, defaults to 0): + The rank of the current node. + num_nodes (`int`, *optional*, defaults to 1): + The number of nodes to use for training. + + Example: + + ```python + # Assume this is defined in a Jupyter Notebook on an instance with two GPUs + from accelerate import notebook_launcher + + + def train(*args): + # Your training function here + ... + + + notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16") + ``` + """ + # Are we in a google colab or a Kaggle Kernel? + in_colab = False + in_kaggle = False + if any(key.startswith("KAGGLE") for key in os.environ.keys()): + in_kaggle = True + elif "IPython" in sys.modules: + in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython()) + + try: + mixed_precision = PrecisionType(mixed_precision.lower()) + except ValueError: + raise ValueError( + f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." + ) + + if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None): + # TPU launch + import torch_xla.distributed.xla_multiprocessing as xmp + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " + "your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + if num_processes is None: + num_processes = 8 + + launcher = PrepareForLaunch(function, distributed_type="TPU") + print(f"Launching a training on {num_processes} TPU cores.") + xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork") + elif in_colab and get_gpu_info()[1] < 2: + # No need for a distributed launch otherwise as it's either CPU or one GPU. + if torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on one CPU.") + function(*args) + else: + if num_processes is None: + raise ValueError( + "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." + ) + if node_rank >= num_nodes: + raise ValueError("The node_rank must be less than the number of nodes.") + if num_processes > 1: + # Multi-GPU launch + from torch.multiprocessing import start_processes + from torch.multiprocessing.spawn import ProcessRaisedException + + if len(AcceleratorState._shared_state) > 0: + raise ValueError( + "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " + "inside your training function. Restart your notebook and make sure no cells initializes an " + "`Accelerator`." + ) + # Check for specific libraries known to initialize CUDA that users constantly use + problematic_imports = are_libraries_initialized("bitsandbytes") + if len(problematic_imports) > 0: + err = ( + "Could not start distributed process. Libraries known to initialize CUDA upon import have been " + "imported already. Please keep these imports inside your training function to try and help with this:" + ) + for lib_name in problematic_imports: + err += f"\n\t* `{lib_name}`" + raise RuntimeError(err) + + patched_env = dict( + nproc=num_processes, + node_rank=node_rank, + world_size=num_nodes * num_processes, + master_addr=master_addr, + master_port=use_port, + mixed_precision=mixed_precision, + ) + + # Check for CUDA P2P and IB issues + if not check_cuda_p2p_ib_support(): + patched_env["nccl_p2p_disable"] = "1" + patched_env["nccl_ib_disable"] = "1" + + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment(**patched_env): + # First dummy launch + if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true": + launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU") + try: + start_processes(launcher, args=(), nprocs=num_processes, start_method="fork") + except ProcessRaisedException as e: + err = "An issue was found when verifying a stable environment for the notebook launcher." + if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: + raise RuntimeError( + f"{err}" + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + "which one is problematic and causing CUDA to be initialized." + ) from e + else: + raise RuntimeError(f"{err} The following error was raised: {e}") from e + # Now the actual launch + launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU") + print(f"Launching training on {num_processes} GPUs.") + try: + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") + except ProcessRaisedException as e: + if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: + raise RuntimeError( + "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " + "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " + "Please review your imports and test them when running the `notebook_launcher()` to identify " + "which one is problematic and causing CUDA to be initialized." + ) from e + else: + raise RuntimeError(f"An issue was found when launching the training: {e}") from e + + else: + # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + print("Launching training on MPS.") + elif torch.cuda.is_available(): + print("Launching training on one GPU.") + else: + print("Launching training on CPU.") + function(*args) + + +def debug_launcher(function, args=(), num_processes=2): + """ + Launches a training function using several processes on CPU for debugging purposes. + + + + This function is provided for internal testing and debugging, but it's not intended for real trainings. It will + only use the CPU. + + + + Args: + function (`Callable`): + The training function to execute. + args (`Tuple`): + Tuple of arguments to pass to the function (it will receive `*args`). + num_processes (`int`, *optional*, defaults to 2): + The number of processes to use for training. + """ + from torch.multiprocessing import start_processes + + with tempfile.NamedTemporaryFile() as tmp_file: + # torch.distributed will expect a few environment variable to be here. We set the ones common to each + # process here (the other ones will be set be the launcher). + with patch_environment( + world_size=num_processes, + master_addr="127.0.0.1", + master_port="29500", + accelerate_mixed_precision="no", + accelerate_debug_rdv_file=tmp_file.name, + accelerate_use_cpu="yes", + ): + launcher = PrepareForLaunch(function, debug=True) + start_processes(launcher, args=args, nprocs=num_processes, start_method="fork") diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/local_sgd.py b/env-llmeval/lib/python3.10/site-packages/accelerate/local_sgd.py new file mode 100644 index 0000000000000000000000000000000000000000..7f2657fcc8b057b4396cf299e6cf681fa7b83aa8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/local_sgd.py @@ -0,0 +1,102 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from accelerate import Accelerator, DistributedType + + +class LocalSGD: + """ + A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently + on each device, and averages model weights every K synchronization step. + + It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular, + this is a simple implementation that cannot support scenarios such as model parallelism. + + + Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes + back to at least: + + Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint + arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) + + We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). + + Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on + Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) + + """ + + def __enter__(self): + if self.enabled: + self.model_sync_obj = self.model.no_sync() + self.model_sync_obj.__enter__() + + return self + + def __exit__(self, type, value, tb): + if self.enabled: + # Average all models on exit + self._sync_and_avg_model_params() + self.model_sync_obj.__exit__(type, value, tb) + + def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True): + """ + Constructor. + + Args: + model (`torch.nn.Module): + The model whose parameters we need to average. + accelerator (`Accelerator`): + Accelerator object. + local_sgd_steps (`int`): + A number of local SGD steps (before model parameters are synchronized). + enabled (`bool): + Local SGD is disabled if this parameter set to `False`. + """ + if accelerator.distributed_type not in [ + DistributedType.NO, + DistributedType.MULTI_CPU, + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + ]: + raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)") + self.enabled = enabled and accelerator.distributed_type != DistributedType.NO + self.num_steps = 0 + if self.enabled: + self.accelerator = accelerator + self.model = model + self.local_sgd_steps = local_sgd_steps + + def step(self): + """ + This function makes a "step" and synchronizes model parameters if necessary. + """ + self.num_steps += 1 + if not self.enabled: + return + + if self.num_steps % self.local_sgd_steps == 0: + self._sync_and_avg_model_params() + + def _sync_and_avg_model_params(self): + """ + Synchronize + Average model parameters across all GPUs + """ + + self.accelerator.wait_for_everyone() + with self.accelerator.autocast(): + for param in self.model.parameters(): + param.data = self.accelerator.reduce(param.data, reduction="mean") diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/logging.py b/env-llmeval/lib/python3.10/site-packages/accelerate/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..ebb8c1eb830e54e3f2870cb3a84afd33b7631ea6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/logging.py @@ -0,0 +1,123 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +import logging +import os + +from .state import PartialState + + +class MultiProcessAdapter(logging.LoggerAdapter): + """ + An adapter to assist with logging in multiprocess. + + `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes + or only the main executed one. Default is `main_process_only=True`. + + Does not require an `Accelerator` object to be created first. + """ + + @staticmethod + def _should_log(main_process_only): + "Check if log should be performed" + state = PartialState() + return not main_process_only or (main_process_only and state.is_main_process) + + def log(self, level, msg, *args, **kwargs): + """ + Delegates logger call after checking if we should log. + + Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes + or only the main executed one. Default is `True` if not passed + + Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to + read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not + break with the previous behavior. + + `in_order` is ignored if `main_process_only` is passed. + """ + if PartialState._shared_state == {}: + raise RuntimeError( + "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." + ) + main_process_only = kwargs.pop("main_process_only", True) + in_order = kwargs.pop("in_order", False) + + if self.isEnabledFor(level): + if self._should_log(main_process_only): + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + + elif in_order: + state = PartialState() + for i in range(state.num_processes): + if i == state.process_index: + msg, kwargs = self.process(msg, kwargs) + self.logger.log(level, msg, *args, **kwargs) + state.wait_for_everyone() + + @functools.lru_cache(None) + def warning_once(self, *args, **kwargs): + """ + This method is identical to `logger.warning()`, but will emit the warning with the same message only once + + Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the + cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to + switch to another type of cache that includes the caller frame information in the hashing function. + """ + self.warning(*args, **kwargs) + + +def get_logger(name: str, log_level: str = None): + """ + Returns a `logging.Logger` for `name` that can handle multiprocessing. + + If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all + processes and in order, also pass `in_order=True` + + Args: + name (`str`): + The name for the logger, such as `__file__` + log_level (`str`, *optional*): + The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not + + Example: + + ```python + >>> from accelerate.logging import get_logger + >>> from accelerate import Accelerator + + >>> logger = get_logger(__name__) + + >>> accelerator = Accelerator() + >>> logger.info("My log", main_process_only=False) + >>> logger.debug("My log", main_process_only=True) + + >>> logger = get_logger(__name__, log_level="DEBUG") + >>> logger.info("My log") + >>> logger.debug("My second log") + + >>> array = ["a", "b", "c", "d"] + >>> letter_at_rank = array[accelerator.process_index] + >>> logger.info(letter_at_rank, in_order=True) + ``` + """ + if log_level is None: + log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) + logger = logging.getLogger(name) + if log_level is not None: + logger.setLevel(log_level.upper()) + logger.root.setLevel(log_level.upper()) + return MultiProcessAdapter(logger, {}) diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/optimizer.py b/env-llmeval/lib/python3.10/site-packages/accelerate/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c2fc3e9f1b7592b29ed18ce1ce78a0859286f438 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/optimizer.py @@ -0,0 +1,193 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import warnings + +import torch + +from .state import AcceleratorState, GradientState +from .utils import DistributedType, honor_type, is_torch_xla_available + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + + +def move_to_device(state, device): + if isinstance(state, (list, tuple)): + return honor_type(state, (move_to_device(t, device) for t in state)) + elif isinstance(state, dict): + return type(state)({k: move_to_device(v, device) for k, v in state.items()}) + elif isinstance(state, torch.Tensor): + return state.to(device) + return state + + +class AcceleratedOptimizer(torch.optim.Optimizer): + """ + Internal wrapper around a torch optimizer. + + Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient + accumulation. + + Args: + optimizer (`torch.optim.optimizer.Optimizer`): + The optimizer to wrap. + device_placement (`bool`, *optional*, defaults to `True`): + Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of + `optimizer` on the right device. + scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): + The scaler to use in the step function if training with mixed precision. + """ + + def __init__(self, optimizer, device_placement=True, scaler=None): + self.optimizer = optimizer + self.scaler = scaler + self.accelerator_state = AcceleratorState() + self.gradient_state = GradientState() + self.device_placement = device_placement + self._is_overflow = False + + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + # Handle device placement + if device_placement: + state_dict = self.optimizer.state_dict() + if self.accelerator_state.distributed_type == DistributedType.XLA: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + else: + state_dict = move_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + @property + def state(self): + return self.optimizer.state + + @state.setter + def state(self, state): + self.optimizer.state = state + + @property + def param_groups(self): + return self.optimizer.param_groups + + @param_groups.setter + def param_groups(self, param_groups): + self.optimizer.param_groups = param_groups + + @property + def defaults(self): + return self.optimizer.defaults + + @defaults.setter + def defaults(self, defaults): + self.optimizer.defaults = defaults + + def add_param_group(self, param_group): + self.optimizer.add_param_group(param_group) + + def load_state_dict(self, state_dict): + if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: + xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) + self.optimizer.load_state_dict(state_dict) + + def state_dict(self): + return self.optimizer.state_dict() + + def zero_grad(self, set_to_none=None): + if self.gradient_state.sync_gradients: + accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters + if accept_arg: + if set_to_none is None: + set_to_none = True + self.optimizer.zero_grad(set_to_none=set_to_none) + else: + if set_to_none is not None: + raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") + self.optimizer.zero_grad() + + def step(self, closure=None): + if ( + not self.gradient_state.is_xla_gradients_synced + and self.accelerator_state.distributed_type == DistributedType.XLA + ): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) + self.gradient_state.is_xla_gradients_synced = True + if self.gradient_state.sync_gradients: + if self.scaler is not None: + self.optimizer.step = self._optimizer_patched_step_method + + self.scaler.step(self.optimizer, closure) + self.scaler.update() + + if not self._accelerate_step_called: + # If the optimizer step was skipped, gradient overflow was detected. + self._is_overflow = True + else: + self._is_overflow = False + # Reset the step method to the original one + self.optimizer.step = self._optimizer_original_step_method + # Reset the indicator + self._accelerate_step_called = False + else: + self.optimizer.step(closure) + if self.accelerator_state.distributed_type == DistributedType.XLA: + self.gradient_state.is_xla_gradients_synced = False + + def _switch_parameters(self, parameters_map): + for param_group in self.optimizer.param_groups: + param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] + + @property + def is_overflow(self): + """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" + warnings.warn( + "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`optimizer.step_was_skipped` instead.", + FutureWarning, + ) + return self._is_overflow + + @property + def step_was_skipped(self): + """Whether or not the optimizer step was skipped.""" + return self._is_overflow + + def __getstate__(self): + _ignored_keys = [ + "_accelerate_step_called", + "_optimizer_original_step_method", + "_optimizer_patched_step_method", + ] + return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} + + def __setstate__(self, state): + self.__dict__.update(state) + if self.scaler is not None: + self._accelerate_step_called = False + self._optimizer_original_step_method = self.optimizer.step + self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) + + +def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): + def patched_step(*args, **kwargs): + accelerated_optimizer._accelerate_step_called = True + return method(*args, **kwargs) + + return patched_step diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/scheduler.py b/env-llmeval/lib/python3.10/site-packages/accelerate/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa8a13f238afd7b908ee8e8cb8e0620f48d4ff8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/scheduler.py @@ -0,0 +1,98 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation + +import warnings + +from .state import AcceleratorState, GradientState + + +warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler") + + +class AcceleratedScheduler: + """ + A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful + to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed + precision training) + + When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always + step the scheduler to account for it. + + Args: + scheduler (`torch.optim.lr_scheduler._LRScheduler`): + The scheduler to wrap. + optimizers (one or a list of `torch.optim.Optimizer`): + The optimizers used. + step_with_optimizer (`bool`, *optional*, defaults to `True`): + Whether or not the scheduler should be stepped at each optimizer step. + split_batches (`bool`, *optional*, defaults to `False`): + Whether or not the dataloaders split one batch across the different processes (so batch size is the same + regardless of the number of processes) or create batches on each process (so batch size is the original + batch size multiplied by the number of processes). + """ + + def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False): + self.scheduler = scheduler + self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers] + self.split_batches = split_batches + self.step_with_optimizer = step_with_optimizer + self.gradient_state = GradientState() + + def step(self, *args, **kwargs): + if not self.step_with_optimizer: + # No link between scheduler and optimizer -> just step + self.scheduler.step(*args, **kwargs) + return + + # Otherwise, first make sure the optimizer was stepped. + if not self.gradient_state.sync_gradients: + if self.gradient_state.adjust_scheduler: + self.scheduler._step_count += 1 + return + + for opt in self.optimizers: + if opt.step_was_skipped: + return + if self.split_batches: + # Split batches -> the training dataloader batch size is not changed so one step per training step + self.scheduler.step(*args, **kwargs) + else: + # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do + # num_processes steps per training step + num_processes = AcceleratorState().num_processes + for _ in range(num_processes): + # Special case when using OneCycle and `drop_last` was not used + if hasattr(self.scheduler, "total_steps"): + if self.scheduler._step_count <= self.scheduler.total_steps: + self.scheduler.step(*args, **kwargs) + else: + self.scheduler.step(*args, **kwargs) + + # Passthroughs + def get_last_lr(self): + return self.scheduler.get_last_lr() + + def state_dict(self): + return self.scheduler.state_dict() + + def load_state_dict(self, state_dict): + self.scheduler.load_state_dict(state_dict) + + def get_lr(self): + return self.scheduler.get_lr() + + def print_lr(self, *args, **kwargs): + return self.scheduler.print_lr(*args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/state.py b/env-llmeval/lib/python3.10/site-packages/accelerate/state.py new file mode 100644 index 0000000000000000000000000000000000000000..90f2b057036b26bcd2f900ff490deda4f1370bf8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/state.py @@ -0,0 +1,1202 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import logging +import math +import os +import threading +import warnings +from contextlib import contextmanager +from functools import partial +from typing import Any, Callable, Optional + +import torch + +from .utils import ( + DistributedType, + DynamoBackend, + GradientAccumulationPlugin, + check_cuda_p2p_ib_support, + check_fp8_capability, + get_ccl_version, + get_cpu_distributed_information, + get_int_from_env, + is_ccl_available, + is_datasets_available, + is_deepspeed_available, + is_fp8_available, + is_ipex_available, + is_mlu_available, + is_mps_available, + is_npu_available, + is_torch_xla_available, + is_xpu_available, + parse_choice_from_env, + parse_flag_from_env, + set_numa_affinity, +) +from .utils.dataclasses import SageMakerDistributedType + + +if is_torch_xla_available(): + import torch_xla.core.xla_model as xm + +if is_mlu_available(check_device=False): + import torch_mlu # noqa: F401 + +if is_npu_available(check_device=False): + import torch_npu # noqa: F401 + +logger = logging.getLogger(__name__) + + +def is_initialized() -> bool: + """ + Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`, + but works as a module method. + """ + return AcceleratorState._shared_state != {} + + +# Lambda function that does nothing +def do_nothing(*args, **kwargs): + return None + + +class ThreadLocalSharedDict(threading.local): + """ + Descriptor that holds a dict shared between instances of a class in the same thread. + + Note: Descriptors have slightly different semantics than just a dict field on its own. + `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the + underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside + the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor + object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`). + + See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html + + This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3). + + See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3 + """ + + def __init__(self, thread_local: bool = False): + self._storage = {} + + def __get__(self, obj, objtype=None): + return self._storage + + def __set__(self, obj, value): + self._storage = value + + +# Prefer global shared dictionary, except when using TPU. +SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict + + +# Inspired by Alex Martelli's 'Borg'. +class PartialState: + """ + Singleton class that has information about the current training environment and functions to help with process + control. Designed to be used when only process control and device execution states are needed. Does *not* need to + be initialized from `Accelerator`. + + Args: + cpu (`bool`, *optional*): + Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to + `True` and force the execution on the CPU. + kwargs (additional keyword arguments, *optional*): + Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be + found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + + Example: + ```python + from accelerate.utils import InitProcessGroupKwargs + + # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()` + kwargs = InitProcessGroupKwargs(...).to_kwargs() + state = PartialState(**kwargs) + ``` + """ + + _shared_state = SharedDict() + _known_attrs = [ + "_cpu", + "_mixed_precision", + "_shared_state", + "backend", + "debug", + "device", + "distributed_type", + "fork_launched", + "local_process_index", + "num_processes", + "process_index", + ] + + def __init__(self, cpu: bool = False, **kwargs): + self.__dict__ = self._shared_state + if not self.initialized: + self._cpu = cpu + self.backend = None + env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None) + self.device = torch.device(env_device) if env_device is not None else None + self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE") + use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None) + dist_information = None + if use_sagemaker_dp is None: + use_sagemaker_dp = ( + os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true" + and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO + ) + + # Sets up self.backend + imports + backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, kwargs.pop("backend", None)) + self.backend = backend + self.distributed_type = distributed_type + use_deepspeed = False + if not cpu and self.backend != "xla": + if int(os.environ.get("LOCAL_RANK", -1)) != -1: + # Deal with spawning deepspeed + if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true": + if not is_deepspeed_available(): + raise ImportError( + "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source" + ) + from deepspeed import comm as dist + + if is_xpu_available() and is_ccl_available(): + os.environ["CCL_PROCESS_LAUNCHER"] = "none" + os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1") + os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0") + + if not dist.is_initialized(): + dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs) + # We need to flag to `use_deepspeed` to be True to override `distributed_type` later + use_deepspeed = True + # Deal with all other backends but XPU and CPU, that gets handled special later + elif ( + self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU) + and not torch.distributed.is_initialized() + ): + torch.distributed.init_process_group(backend=self.backend, **kwargs) + # XPU and CPU require special env configs to be set + if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU): + dist_information = get_cpu_distributed_information() + os.environ["RANK"] = str(dist_information.rank) + os.environ["WORLD_SIZE"] = str(dist_information.world_size) + os.environ["LOCAL_RANK"] = str(dist_information.local_rank) + os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size) + if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU: + os.environ["CCL_PROCESS_LAUNCHER"] = "none" + os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"] + os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"] + if not os.environ.get("MASTER_PORT", None): + os.environ["MASTER_PORT"] = "29500" + if ( + not os.environ.get("MASTER_ADDR", None) + and dist_information.local_world_size != dist_information.world_size + and self.backend != "mpi" + ): + raise ValueError( + "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, " + "please try exporting rank 0's hostname as `MASTER_ADDR`" + ) + kwargs["rank"] = dist_information.rank + kwargs["world_size"] = dist_information.world_size + + if ( + self.distributed_type == DistributedType.MULTI_CPU + and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0 + ): + import psutil + + num_cpu_threads_per_process = int( + psutil.cpu_count(logical=False) / dist_information.local_world_size + ) + if num_cpu_threads_per_process == 0: + num_cpu_threads_per_process = 1 + torch.set_num_threads(num_cpu_threads_per_process) + warnings.warn( + f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob" + " performance." + ) + + if not torch.distributed.is_initialized(): + torch.distributed.init_process_group(backend=self.backend, **kwargs) + + # No backend == no distributed training + if self.backend is None: + self.distributed_type = DistributedType.NO + self.num_processes = 1 + self.process_index = 0 + self.local_process_index = 0 + elif self.backend == "xla": + # XLA needs device setting first for `set_replication` + self.set_device() + xm.set_replication(self.device, xm.get_xla_supported_devices()) + self.num_processes = xm.xrt_world_size() + self.process_index = xm.get_ordinal() + if is_torch_xla_available(check_is_tpu=True): + self.local_process_index = xm.get_local_ordinal() + else: + self.local_process_index = int(os.environ.get("LOCAL_RANK", -1)) + else: + self.num_processes = torch.distributed.get_world_size() + self.process_index = torch.distributed.get_rank() + self.local_process_index = ( + int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank + ) + self.set_device() + # Now we can change to deepseed + if use_deepspeed: + self.distributed_type = DistributedType.DEEPSPEED + + # Set CPU affinity if enabled + if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False): + set_numa_affinity(self.local_process_index) + + # Check for old RTX 4000's that can't use P2P or IB and are on old drivers + if self.device.type == "cuda" and not check_cuda_p2p_ib_support(): + if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ: + raise NotImplementedError( + "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. " + 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which ' + "will do this automatically." + ) + # Important: This should be the *only* code outside of `self.initialized!` + self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0) + + def __repr__(self) -> str: + return ( + f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n" + f"Num processes: {self.num_processes}\n" + f"Process index: {self.process_index}\n" + f"Local process index: {self.local_process_index}\n" + f"Device: {self.device}\n" + ) + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + PartialState._shared_state.clear() + + @property + def initialized(self) -> bool: + "Returns whether the `PartialState` has been initialized" + return self._shared_state != {} + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return self.distributed_type != DistributedType.NO and self.num_processes > 1 + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return self.process_index == self.num_processes - 1 + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return ( + self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process + ) + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return ( + self.local_process_index == 0 + if self.distributed_type != DistributedType.MEGATRON_LM + else self.is_last_process + ) + + def wait_for_everyone(self): + """ + Will stop the execution of the current process until every other process has reached that point (so this does + nothing when the script is only run in one process). Useful to do before saving a model. + + Example: + + ```python + >>> # Assuming two GPU processes + >>> import time + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> if state.is_main_process: + ... time.sleep(2) + >>> else: + ... print("I'm waiting for the main process to finish its sleep...") + >>> state.wait_for_everyone() + >>> # Should print on every process at the same time + >>> print("Everyone is here") + ``` + """ + if self.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + DistributedType.MULTI_CPU, + DistributedType.DEEPSPEED, + DistributedType.FSDP, + ): + torch.distributed.barrier() + elif self.distributed_type == DistributedType.XLA: + xm.rendezvous("accelerate.utils.wait_for_everyone") + + def _goes_first(self, is_main: bool): + if not is_main: + self.wait_for_everyone() + + yield + + if is_main: + self.wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate import PartialState + + state = PartialState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + if self.num_processes == 1: + yield inputs + return + length = len(inputs) + # Nested dictionary of any types + if isinstance(inputs, dict): + length = len(inputs[list(inputs.keys())[0]]) + if not all(len(v) == length for v in inputs.values()): + raise ValueError("All values in the dictionary must have the same length") + num_samples_per_process = math.ceil(length / self.num_processes) + start_index = self.process_index * num_samples_per_process + end_index = start_index + num_samples_per_process + if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1): + end_index = length + + def _split_values(inputs, start_index, end_index): + if isinstance(inputs, (list, tuple, torch.Tensor)): + if start_index >= len(inputs): + result = inputs[-1:] + else: + result = inputs[start_index:end_index] + if apply_padding: + if isinstance(result, torch.Tensor): + from accelerate.utils import pad_across_processes, send_to_device + + # The tensor needs to be on the device before we can pad it + tensorized_result = send_to_device(result, self.device) + result = pad_across_processes(tensorized_result, pad_index=inputs[-1]) + else: + result += [result[-1]] * (num_samples_per_process - len(result)) + return result + elif isinstance(inputs, dict): + for key in inputs.keys(): + inputs[key] = _split_values(inputs[key], start_index, end_index) + return inputs + else: + if is_datasets_available(): + from datasets import Dataset + + if isinstance(inputs, Dataset): + if start_index >= len(inputs): + start_index = len(inputs) - 1 + if end_index > len(inputs): + end_index = len(inputs) + result_idcs = list(range(start_index, end_index)) + if apply_padding: + result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs)) + return inputs.select(result_idcs) + return inputs + + yield _split_values(inputs, start_index, end_index) + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate import Accelerator + + >>> accelerator = Accelerator() + >>> with accelerator.main_process_first(): + ... # This will be printed first by process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {accelerator.process_index}") + ``` + """ + yield from self._goes_first(self.is_main_process) + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + >>> with state.local_main_process_first(): + ... # This will be printed first by local process 0 then in a seemingly + ... # random order by the other processes. + ... print(f"This will be printed by process {state.local_process_index}") + ``` + """ + yield from self._goes_first(self.is_local_main_process) + + def on_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + + ```python + >>> from accelerate.state import PartialState + + >>> state = PartialState() + + + >>> @state.on_main_process + ... def print_something(): + ... print("This will be printed by process 0 only.") + + + >>> print_something() + "This will be printed by process 0 only" + ``` + """ + if not self.initialized: + raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.") + if self.is_main_process or not self.use_distributed: + return function + return do_nothing + + def on_local_main_process(self, function: Callable[..., Any] = None): + """ + Decorator that only runs the decorated function on the local main process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_local_main_process + def print_something(): + print("This will be printed by process 0 only on each server.") + + + print_something() + # On server 1: + "This will be printed by process 0 only" + # On server 2: + "This will be printed by process 0 only" + ``` + """ + if self.is_local_main_process or not self.use_distributed: + return function + return do_nothing + + def on_last_process(self, function: Callable[..., Any]): + """ + Decorator that only runs the decorated function on the last process. + + Args: + function (`Callable`): The function to decorate. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_last_process + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 3" + ``` + """ + if self.is_last_process or not self.use_distributed: + return function + return do_nothing + + def on_process(self, function: Callable[..., Any] = None, process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index. + + Args: + function (`Callable`, `optional`): + The function to decorate. + process_index (`int`, `optional`): + The index of the process on which to run the function. + + Example: + ```python + # Assume we have 4 processes. + from accelerate.state import PartialState + + state = PartialState() + + + @state.on_process(process_index=2) + def print_something(): + print(f"Printed on process {state.process_index}") + + + print_something() + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_process, process_index=process_index) + if (self.process_index == process_index) or (not self.use_distributed): + return function + return do_nothing + + def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None): + """ + Decorator that only runs the decorated function on the process with the given index on the current node. + + Args: + function (`Callable`, *optional*): + The function to decorate. + local_process_index (`int`, *optional*): + The index of the local process on which to run the function. + + Example: + ```python + # Assume we have 2 servers with 4 processes each. + from accelerate import Accelerator + + accelerator = Accelerator() + + + @accelerator.on_local_process(local_process_index=2) + def print_something(): + print(f"Printed on process {accelerator.local_process_index}") + + + print_something() + # On server 1: + "Printed on process 2" + # On server 2: + "Printed on process 2" + ``` + """ + if function is None: + return partial(self.on_local_process, local_process_index=local_process_index) + if (self.local_process_index == local_process_index) or (not self.use_distributed): + return function + return do_nothing + + def print(self, *args, **kwargs): + if self.is_local_main_process: + print(*args, **kwargs) + + @property + def default_device(self) -> torch.device: + """ + Returns the default device which is: + - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True. + - CUDA if `torch.cuda.is_available()` + - MLU if `is_mlu_available()` + - NPU if `is_npu_available()` + - CPU otherwise + """ + if is_mps_available(): + os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" + return torch.device("mps") + elif is_mlu_available(): + return torch.device("mlu") + elif torch.cuda.is_available(): + return torch.device("cuda") + elif is_xpu_available(): + return torch.device("xpu:0") + elif is_npu_available(): + return torch.device("npu") + else: + return torch.device("cpu") + + def _prepare_backend( + self, cpu: bool = False, sagemaker_dp=False, backend: str = None + ) -> tuple[str, DistributedType]: + "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly" + distributed_type = None + if sagemaker_dp: + import smdistributed.dataparallel.torch.torch_smddp # noqa + + backend = "smddp" + distributed_type = DistributedType.MULTI_GPU + elif is_torch_xla_available(): + backend = "xla" + distributed_type = DistributedType.XLA + elif int(os.environ.get("LOCAL_RANK", -1)) != -1: + if not cpu: + if is_mlu_available(): + backend = "cncl" + distributed_type = DistributedType.MULTI_MLU + elif torch.cuda.is_available(): + if backend is None: + backend = "nccl" + distributed_type = DistributedType.MULTI_GPU + elif is_npu_available(): + backend = "hccl" + distributed_type = DistributedType.MULTI_NPU + if backend is None and ( + int(os.environ.get("LOCAL_RANK", -1)) != -1 + or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1 + ): + if not cpu and is_xpu_available(): + distributed_type = DistributedType.MULTI_XPU + else: + distributed_type = DistributedType.MULTI_CPU + if is_ccl_available() and ( + get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU + ): + if get_ccl_version() >= "1.12": + import oneccl_bindings_for_pytorch # noqa: F401 + else: + import torch_ccl # noqa: F401 + + backend = "ccl" + elif torch.distributed.is_mpi_available(): + backend = "mpi" + else: + backend = "gloo" + if distributed_type is None: + distributed_type = DistributedType.NO + return backend, distributed_type + + def set_device(self): + """ + Sets the device in `self.device` to the current distributed environment. + """ + if self.device is not None: + return + if self.distributed_type == DistributedType.NO: + self.device = torch.device("cpu") if self._cpu else self.default_device + return + device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower() + if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"): + raise ValueError( + f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!" + ) + if device == "xla": + self.device = xm.xla_device() + else: + if device == "gpu": + device = "cuda" + self.device = torch.device(device, self.local_process_index) + if self.device is not None: + if device == "xpu": + torch.xpu.set_device(self.device) + elif device == "mlu": + torch.mlu.set_device(self.device) + elif device == "npu": + torch.npu.set_device(self.device) + elif device == "cuda": + torch.cuda.set_device(self.device) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`PartialState` object has no attribute `{name}`. " + "This happens if `PartialState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'PartialState' object has no attribute '{name}'") + + +class AcceleratorState: + """ + Singleton class that has information about the current training environment. + + **Available attributes:** + + - **device** (`torch.device`) -- The device to use. + - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently + in use. + - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`. + - **local_process_index** (`int`) -- The index of the current process on the current server. + - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type + of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8'). + - **num_processes** (`int`) -- The number of processes currently launched in parallel. + - **process_index** (`int`) -- The index of the current process. + - **is_last_process** (`bool`) -- Whether or not the current process is the last one. + - **is_main_process** (`bool`) -- Whether or not the current process is the main one. + - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node. + - **debug** (`bool`) -- Whether or not the current script is being run in debug mode. + """ + + _shared_state = SharedDict() + _known_attrs = PartialState._known_attrs + [ + "deepspeed_plugin", + "use_ipex", + "fsdp_plugin", + "megatron_lm_plugin", + "dynamo_plugin", + ] + + def __init__( + self, + mixed_precision: str = None, + cpu: bool = False, + dynamo_plugin=None, + deepspeed_plugin=None, + fsdp_plugin=None, + megatron_lm_plugin=None, + _from_accelerator: bool = False, + **kwargs, + ): + self.__dict__ = self._shared_state + if parse_flag_from_env("ACCELERATE_USE_CPU"): + cpu = True + if PartialState._shared_state == {}: + PartialState(cpu, **kwargs) + self.__dict__.update(PartialState._shared_state) + self._check_initialized(mixed_precision, cpu) + if not self.initialized: + self.deepspeed_plugin = None + self.use_ipex = None + mixed_precision = ( + parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no") + if mixed_precision is None + else mixed_precision.lower() + ) + if mixed_precision == "fp8": + if not is_fp8_available(): + raise ValueError( + "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed." + ) + elif not check_fp8_capability(): + logger.warning( + f"The current device has compute capability of {torch.cuda.get_device_capability()} which is " + "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace " + "or higher, compute capability of 8.9 or higher). Will use FP16 instead." + ) + mixed_precision = "fp16" + + self.dynamo_plugin = dynamo_plugin + if not _from_accelerator: + raise ValueError( + "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` " + "before using any functionality from the `accelerate` library." + ) + # deepspeed handles mixed_precision using deepspeed_config + self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision + if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True): + if mixed_precision == "bf16": + if os.environ.get("ACCELERATE_DOWNCAST_BF16"): + os.environ["XLA_USE_BF16"] = str(0) + os.environ["XLA_DOWNCAST_BF16"] = str(1) + self.downcast_bfloat = True + else: + os.environ["XLA_USE_BF16"] = str(1) + os.environ["XLA_DOWNCAST_BF16"] = str(0) + self.downcast_bfloat = False + elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu: + self.deepspeed_plugin = deepspeed_plugin + elif self.distributed_type in [ + DistributedType.MULTI_GPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + ]: + if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true": + self.distributed_type = DistributedType.FSDP + if self._mixed_precision != "no": + fsdp_plugin.set_mixed_precision(self._mixed_precision) + self.fsdp_plugin = fsdp_plugin + if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [ + DistributedType.MULTI_NPU, + DistributedType.MULTI_XPU, + ]: + self.distributed_type = DistributedType.MEGATRON_LM + megatron_lm_plugin.set_mixed_precision(self._mixed_precision) + self.megatron_lm_plugin = megatron_lm_plugin + elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]: + if is_ipex_available(): + # check if user disables it explicitly + self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True) + else: + self.use_ipex = False + if ( + self.dynamo_plugin.backend != DynamoBackend.NO + and self._mixed_precision == "no" + and self.device.type == "cuda" + ): + torch.backends.cuda.matmul.allow_tf32 = True + PartialState._shared_state["distributed_type"] = self.distributed_type + + @property + def initialized(self) -> bool: + return self._shared_state != PartialState._shared_state + + def __repr__(self): + repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n" + if self.distributed_type == DistributedType.DEEPSPEED: + repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n" + return repr + + def _check_initialized(self, mixed_precision=None, cpu=None): + "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized" + if self.initialized: + err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`." + if cpu and self.device.type != "cpu": + raise ValueError(err.format(flag="cpu=True")) + if ( + mixed_precision is not None + and mixed_precision != self._mixed_precision + and self.distributed_type != DistributedType.DEEPSPEED + ): + raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'")) + + # For backward compatibility + @property + def use_fp16(self): + warnings.warn( + "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use " + "`AcceleratorState.mixed_precision == 'fp16'` instead.", + FutureWarning, + ) + return self._mixed_precision != "no" + + @property + def mixed_precision(self): + if self.distributed_type == DistributedType.DEEPSPEED: + config = self.deepspeed_plugin.deepspeed_config + if config.get("fp16", {}).get("enabled", False): + mixed_precision = "fp16" + elif config.get("bf16", {}).get("enabled", False): + mixed_precision = "bf16" + else: + mixed_precision = "no" + else: + mixed_precision = self._mixed_precision + return mixed_precision + + @staticmethod + def _reset_state(reset_partial_state: bool = False): + "Resets `_shared_state`, is used internally and should not be called" + AcceleratorState._shared_state.clear() + if reset_partial_state: + PartialState._reset_state() + + @property + def use_distributed(self): + """ + Whether the Accelerator is configured for distributed training + """ + return PartialState().use_distributed + + @property + def is_last_process(self) -> bool: + "Returns whether the current process is the last one" + return PartialState().is_last_process + + @property + def is_main_process(self) -> bool: + "Returns whether the current process is the main process" + return PartialState().is_main_process + + @property + def is_local_main_process(self) -> bool: + "Returns whether the current process is the main process on the local node" + return PartialState().is_local_main_process + + def wait_for_everyone(self): + PartialState().wait_for_everyone() + + @contextmanager + def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False): + """ + Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing + distributed inference, such as with different prompts. + + Note that when using a `dict`, all keys need to have the same number of elements. + + Args: + inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`): + The input to split between processes. + apply_padding (`bool`, `optional`, defaults to `False`): + Whether to apply padding by repeating the last element of the input so that all processes have the same + number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing + in less inputs than there are processes. If so, just remember to drop the padded elements afterwards. + + + Example: + + ```python + # Assume there are two processes + from accelerate.state import AcceleratorState + + state = AcceleratorState() + with state.split_between_processes(["A", "B", "C"]) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C"] + + with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs: + print(inputs) + # Process 0 + ["A", "B"] + # Process 1 + ["C", "C"] + ``` + """ + with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs: + yield inputs + + @contextmanager + def main_process_first(self): + """ + Lets the main process go first inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().main_process_first(): + yield + + @contextmanager + def local_main_process_first(self): + """ + Lets the local main process go inside a with block. + + The other processes will enter the with block after the main process exits. + """ + with PartialState().local_main_process_first(): + yield + + def print(self, *args, **kwargs): + PartialState().print(*args, **kwargs) + + def __getattr__(self, name: str): + # By this point we know that no attributes of `self` contain `name`, + # so we just modify the error message + if name in self._known_attrs: + raise AttributeError( + f"`AcceleratorState` object has no attribute `{name}`. " + "This happens if `AcceleratorState._reset_state()` was called and " + "an `Accelerator` or `PartialState` was not reinitialized." + ) + # Raise a typical AttributeError + raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'") + + +class GradientState: + """ + Singleton class that has information related to gradient synchronization for gradient accumulation + + **Available attributes:** + + - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader + - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader + - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices + - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over + - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are + being iterated over + - **num_steps** (`int`) -- The number of steps to accumulate over + - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient + accumulation + - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader + iteration and the number of total steps reset + - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized + as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently, + after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence + is_xla_gradients_synced is always true. + """ + + _shared_state = SharedDict() + + def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None): + self.__dict__ = self._shared_state + if not self.initialized: + self.sync_gradients = True + self.active_dataloader = None + self.dataloader_references = [None] + self.plugin_kwargs = ( + gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {} + ) + self._is_xla_gradients_synced = False + + # Plugin args are different and can be updated + if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs(): + self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs() + + @property + def num_steps(self) -> int: + "Returns the number of steps to accumulate over" + return self.plugin_kwargs.get("num_steps", 1) + + @property + def adjust_scheduler(self) -> bool: + "Returns whether the scheduler should be adjusted" + return self.plugin_kwargs.get("adjust_scheduler", False) + + @property + def sync_with_dataloader(self) -> bool: + "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset" + return self.plugin_kwargs.get("sync_with_dataloader", True) + + @property + def initialized(self) -> bool: + "Returns whether the `GradientState` has been initialized" + return GradientState._shared_state != {} + + @property + def end_of_dataloader(self) -> bool: + "Returns whether we have reached the end of the current dataloader" + if not self.in_dataloader: + return False + return self.active_dataloader.end_of_dataloader + + @property + def remainder(self) -> int: + "Returns the number of extra samples that were added from padding the dataloader" + if not self.in_dataloader: + return -1 + return self.active_dataloader.remainder + + def __repr__(self): + return ( + f"Sync Gradients: {self.sync_gradients}\n" + f"At end of current dataloader: {self.end_of_dataloader}\n" + f"Extra samples added: {self.remainder}\n" + f"Gradient accumulation plugin: {self.plugin_kwargs}\n" + ) + + @property + def is_xla_gradients_synced(self): + "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true." + if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False): + return True + return self._is_xla_gradients_synced + + @is_xla_gradients_synced.setter + def is_xla_gradients_synced(self, is_synced): + "Set the _is_xla_gradients_synced attribute." + self._is_xla_gradients_synced = is_synced + + def _set_sync_gradients(self, sync_gradients): + "Private function that sets whether gradients should be synchronized. Users should not have to call this." + self.sync_gradients = sync_gradients + # Allow grad-sync to automatically work on TPUs + if ( + self.sync_gradients + and is_torch_xla_available(check_is_tpu=True) + and PartialState().distributed_type == DistributedType.XLA + ): + xm.mark_step() + + def _add_dataloader(self, dataloader): + "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this." + self.active_dataloader = dataloader + self.dataloader_references.append(self.active_dataloader) + + def _remove_dataloader(self, dataloader): + "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this." + self.dataloader_references.remove(dataloader) + self.active_dataloader = self.dataloader_references[-1] + + @property + def in_dataloader(self) -> bool: + "Returns whether the current process is in a dataloader" + return self.active_dataloader is not None + + @staticmethod + def _reset_state(): + "Resets `_shared_state`, is used internally and should not be called" + GradientState._shared_state.clear() diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd5f6f15da98737459dcf7ab05b68e87be9a384d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__init__.py @@ -0,0 +1,50 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from .testing import ( + DEFAULT_LAUNCH_COMMAND, + are_the_same_tensors, + assert_exception, + device_count, + execute_subprocess_async, + get_launch_command, + memory_allocated_func, + path_in_accelerate_package, + require_bnb, + require_cpu, + require_cuda, + require_huggingface_suite, + require_mlu, + require_mps, + require_multi_device, + require_multi_gpu, + require_multi_xpu, + require_non_cpu, + require_non_torch_xla, + require_non_xpu, + require_npu, + require_pippy, + require_single_device, + require_single_gpu, + require_single_xpu, + require_torch_min_version, + require_tpu, + require_xpu, + skip, + slow, + torch_device, +) +from .training import RegressionDataset, RegressionModel, RegressionModel4XPU + + +from .scripts import test_script, test_sync, test_ops # isort: skip diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5091932acbc7c5553e35418b5e047aa4dc75994 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2592c14bd8120f37821988f80a9492153e31961a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2103a2f5ce1b34f4749ee3e403cd4bb4a182234b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97e41f7e7e4932efe9bd73b8b75490b1023415f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/examples.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/examples.py new file mode 100644 index 0000000000000000000000000000000000000000..ed41d38c9092385ba9730472aa10b5208f48c67b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/examples.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each +`examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the +others are used to either get the code that matters, or to preprocess them (such as stripping comments) +""" + +import os +from typing import List + + +def get_function_contents_by_name(lines: List[str], name: str): + """ + Extracts a function from `lines` of segmented source code with the name `name`. + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + name (`str`): + The name of the function to extract. Should be either `training_function` or `main` + """ + if name != "training_function" and name != "main": + raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'") + good_lines, found_start = [], False + for line in lines: + if not found_start and f"def {name}" in line: + found_start = True + good_lines.append(line) + continue + if found_start: + if name == "training_function" and "def main" in line: + return good_lines + if name == "main" and "if __name__" in line: + return good_lines + good_lines.append(line) + + +def clean_lines(lines: List[str]): + """ + Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n') + + Args: + lines (`List[str]`): + Source code of a script seperated by line. + """ + return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"] + + +def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None): + """ + Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be + used when testing to see if `complete_*_.py` examples have all of the implementations from each of the + `examples/by_feature/*` scripts. + + It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code + is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the + `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter. + + Args: + base_filename (`str` or `os.PathLike`): + The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py` + feature_filename (`str` or `os.PathLike`): + The filepath of a single feature example script. The contents of this script are checked to see if they + exist in `base_filename` + parser_only (`bool`): + Whether to compare only the `main()` sections in both files, or to compare the contents of + `training_loop()` + secondary_filename (`str`, *optional*): + A potential secondary filepath that should be included in the check. This function extracts the base + functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than + `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py` + """ + with open(base_filename) as f: + base_file_contents = f.readlines() + with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f: + full_file_contents = f.readlines() + with open(feature_filename) as f: + feature_file_contents = f.readlines() + if secondary_filename is not None: + with open(secondary_filename) as f: + secondary_file_contents = f.readlines() + + # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content + if parser_only: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main")) + if secondary_filename is not None: + secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main")) + else: + base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function")) + full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function")) + feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function")) + if secondary_filename is not None: + secondary_file_func = clean_lines( + get_function_contents_by_name(secondary_file_contents, "training_function") + ) + + _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n" + + # Specific code in our script that differs from the full version, aka what is new + new_feature_code = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + it = iter(feature_file_func) + for i in range(len(feature_file_func) - 1): + if i not in passed_idxs: + line = next(it) + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_feature_code.append(line) + passed_idxs.append(i) + else: + # Skip over the `config['num_epochs'] = 2` statement + _ = next(it) + + # Extract out just the new parts from the full_file_training_func + new_full_example_parts = [] + passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement + for i, line in enumerate(base_file_func): + if i not in passed_idxs: + if (line not in full_file_func) and (line.lstrip() != _dl_line): + if "TESTING_MOCKED_DATALOADERS" not in line: + new_full_example_parts.append(line) + passed_idxs.append(i) + + # Finally, get the overall diff + diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts] + if secondary_filename is not None: + diff_from_two = [line for line in full_file_contents if line not in secondary_file_func] + diff_from_example = [line for line in diff_from_example if line not in diff_from_two] + + return diff_from_example diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..c85828cd49624372ae1866082e5580c60f8c9293 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py @@ -0,0 +1,26 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + + +def main(): + if torch.cuda.is_available(): + num_gpus = torch.cuda.device_count() + else: + num_gpus = 0 + print(f"Successfully ran on {num_gpus} GPUs") + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py new file mode 100644 index 0000000000000000000000000000000000000000..17d577c58ac2e0bea6e63c54b464eef483de12a8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python + +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import warnings +from typing import List +from unittest.mock import Mock + +import torch +from torch.utils.data import DataLoader, IterableDataset, TensorDataset + +from accelerate.accelerator import Accelerator, DataLoaderConfiguration +from accelerate.utils.dataclasses import DistributedType + + +class DummyIterableDataset(IterableDataset): + def __init__(self, data): + self.data = data + + def __iter__(self): + yield from self.data + + +def create_accelerator(even_batches=True): + dataloader_config = DataLoaderConfiguration(even_batches=even_batches) + accelerator = Accelerator(dataloader_config=dataloader_config) + assert accelerator.num_processes == 2, "this script expects that two GPUs are available" + return accelerator + + +def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False): + """ + Create a simple DataLoader to use during the test cases + """ + if iterable: + dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size))) + else: + dataset = TensorDataset(torch.as_tensor(range(dataset_size))) + + dl = DataLoader(dataset, batch_size=batch_size) + dl = accelerator.prepare(dl) + + return dl + + +def verify_dataloader_batch_sizes( + accelerator: Accelerator, + dataset_size: int, + batch_size: int, + process_0_expected_batch_sizes: List[int], + process_1_expected_batch_sizes: List[int], +): + """ + A helper function for verifying the batch sizes coming from a prepared dataloader in each process + """ + dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) + + batch_sizes = [len(batch[0]) for batch in dl] + + if accelerator.process_index == 0: + assert batch_sizes == process_0_expected_batch_sizes + elif accelerator.process_index == 1: + assert batch_sizes == process_1_expected_batch_sizes + + +def test_default_ensures_even_batch_sizes(): + accelerator = create_accelerator() + + # without padding, we would expect a different number of batches + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1, 1], + ) + + # without padding, we would expect the same number of batches, but different sizes + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 2], + ) + + +def test_can_disable_even_batches(): + accelerator = create_accelerator(even_batches=False) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=3, + batch_size=1, + process_0_expected_batch_sizes=[1, 1], + process_1_expected_batch_sizes=[1], + ) + + verify_dataloader_batch_sizes( + accelerator, + dataset_size=7, + batch_size=2, + process_0_expected_batch_sizes=[2, 2], + process_1_expected_batch_sizes=[2, 1], + ) + + +def test_can_join_uneven_inputs(): + accelerator = create_accelerator(even_batches=False) + + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + + dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + batch_idxs = [] + with accelerator.join_uneven_inputs([ddp_model]): + for batch_idx, batch in enumerate(dl): + output = ddp_model(batch[0].float()) + loss = output.sum() + loss.backward() + batch_idxs.append(batch_idx) + + accelerator.wait_for_everyone() + + if accelerator.process_index == 0: + assert batch_idxs == [0, 1] + elif accelerator.process_index == 1: + assert batch_idxs == [0] + + +def test_join_raises_warning_for_non_ddp_distributed(accelerator): + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([Mock()]): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for multi-GPU" in str(w[-1].message) + + +def test_join_can_override_even_batches(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + train_dl_overridden_value = train_dl.batch_sampler.even_batches + valid_dl_overridden_value = valid_dl.batch_sampler.even_batches + + assert train_dl_overridden_value == overridden_even_batches + assert valid_dl_overridden_value == overridden_even_batches + assert train_dl.batch_sampler.even_batches == default_even_batches + assert valid_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_can_override_for_mixed_type_dataloaders(): + default_even_batches = True + overridden_even_batches = False + accelerator = create_accelerator(even_batches=default_even_batches) + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + try: + with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): + batch_dl_overridden_value = batch_dl.batch_sampler.even_batches + except AttributeError: + # ensure attribute error is not raised when processing iterable dl + raise AssertionError + + assert batch_dl_overridden_value == overridden_even_batches + assert batch_dl.batch_sampler.even_batches == default_even_batches + + +def test_join_raises_warning_for_iterable_when_overriding_even_batches(): + accelerator = create_accelerator() + model = torch.nn.Linear(1, 1) + ddp_model = accelerator.prepare(model) + create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) + + with warnings.catch_warnings(record=True) as w: + with accelerator.join_uneven_inputs([ddp_model], even_batches=False): + pass + + assert issubclass(w[-1].category, UserWarning) + assert "only supported for map-style datasets" in str(w[-1].message) + + +def main(): + accelerator = create_accelerator() + + accelerator.print("Test that even_batches variable ensures uniform batches across processes") + test_default_ensures_even_batch_sizes() + + accelerator.print("Run tests with even_batches disabled") + test_can_disable_even_batches() + + accelerator.print("Test joining uneven inputs") + test_can_join_uneven_inputs() + + accelerator.print("Test overriding even_batches when joining uneven inputs") + test_join_can_override_even_batches() + + accelerator.print("Test overriding even_batches for mixed dataloader types") + test_join_can_override_for_mixed_type_dataloaders() + + accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") + test_join_raises_warning_for_iterable_when_overriding_even_batches() + + accelerator.print("Test join with non DDP distributed raises warning") + original_state = accelerator.state.distributed_type + accelerator.state.distributed_type = DistributedType.FSDP + test_join_raises_warning_for_non_ddp_distributed(accelerator) + accelerator.state.distributed_type = original_state + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py new file mode 100644 index 0000000000000000000000000000000000000000..bd458bcab8aaa42409a7c1234a4afffb087e8a7c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py @@ -0,0 +1,392 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from copy import deepcopy + +import torch +import torch.nn.functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader + +from accelerate.accelerator import Accelerator, GradientAccumulationPlugin +from accelerate.state import GradientState +from accelerate.test_utils import RegressionDataset, RegressionModel +from accelerate.utils import DistributedType, set_seed + + +def check_model_parameters(model_a, model_b, did_step, iteration, **kwargs): + for param, grad_param in zip(model_a.parameters(), model_b.parameters()): + if not param.requires_grad: + continue + if not did_step: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, grad_param.grad, **kwargs) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, grad_param.grad, **kwargs) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" + + +def step_model(model, input, target, accelerator, do_backward=True): + model.train() + output = model(input) + loss = F.mse_loss(output, target.to(output.device)) + if not do_backward: + loss /= accelerator.gradient_accumulation_steps + loss.backward() + else: + accelerator.backward(loss) + + +def get_training_setup(accelerator, sched=False): + "Returns everything needed to perform basic training" + set_seed(42) + model = RegressionModel() + ddp_model = deepcopy(model) + dset = RegressionDataset(length=80) + dataloader = DataLoader(dset, batch_size=16) + model.to(accelerator.device) + if sched: + opt = AdamW(params=model.parameters(), lr=1e-3) + ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) + sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) + ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) + # Make a copy of `model` + if sched: + ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) + else: + ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) + if sched: + return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) + return model, ddp_model, dataloader + + +def test_noop_sync(accelerator): + # Test when on a single CPU or GPU that the context manager does nothing + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync + check_model_parameters(model, ddp_model, True, iteration) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + assert torch.allclose( + param.grad, ddp_param.grad + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync(accelerator): + # Test on distributed setup that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + # Use a single batch + ddp_input, ddp_target = next(iter(dataloader)).values() + for iteration in range(3): + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + # Do "gradient accumulation" (noop) + if iteration % 2 == 0: + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + else: + # Sync grads + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if iteration % 2 == 0: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + else: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + + +def test_distributed_sync_multiple_fwd(accelerator): + # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards + model, ddp_model, dataloader = get_training_setup(accelerator) + # Do multiple forwards + losses = [] + num_iterations = 3 + for iteration in range(num_iterations): + ddp_input, ddp_target = next(iter(dataloader)).values() + + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator) + + # Accumulate grads locally + with accelerator.no_sync(ddp_model): + ddp_output = ddp_model(ddp_input) + loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device)) + losses.append(loss) + + # Do multiple backwards and sync only at the last backward + for iteration in range(num_iterations): + loss = losses[iteration] + + if iteration < num_iterations - 1: + # Accumulate grads locally + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + else: + # Sync grads if last backward + with accelerator.trigger_sync_in_backward(ddp_model): + accelerator.backward(loss) + + # DDP model and model should only be in sync after last backward + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + + +def test_gradient_accumulation(split_batches=False, dispatch_batches=False, sync_each_batch=False): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + accelerator = Accelerator( + split_batches=split_batches, + dispatch_batches=dispatch_batches, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, ddp_model, dataloader = get_training_setup(accelerator) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + step_model(model, input, target, accelerator, False) + # Do "gradient accumulation" (noop) + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + + # DDP model and model should only be in sync when not (iteration % 2 == 0) + for param, ddp_param in zip(model.parameters(), ddp_model.parameters()): + if not param.requires_grad: + continue + if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1) or sync_each_batch: + # Grads should be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is True + ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" + else: + # Grads should not be in sync + assert ( + torch.allclose(param.grad, ddp_param.grad) is False + ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + ddp_input = ddp_input[torch.randperm(len(ddp_input))] + GradientState._reset_state() + + +def test_gradient_accumulation_with_opt_and_scheduler( + split_batches=False, dispatch_batches=False, sync_each_batch=False +): + gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch) + accelerator = Accelerator( + split_batches=split_batches, + dispatch_batches=dispatch_batches, + gradient_accumulation_plugin=gradient_accumulation_plugin, + ) + # Test that context manager behaves properly + model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True) + for iteration, batch in enumerate(dataloader): + ddp_input, ddp_target = batch.values() + # Gather the distributed inputs and targs for the base model + input, target = accelerator.gather((ddp_input, ddp_target)) + input, target = input.to(accelerator.device), target.to(accelerator.device) + # Perform our initial ground truth step in non "DDP" + model.train() + ddp_model.train() + step_model(model, input, target, accelerator, False) + opt.step() + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch: + if split_batches: + sched.step() + else: + for _ in range(accelerator.num_processes): + sched.step() + + # Perform gradient accumulation under wrapper + with accelerator.accumulate(ddp_model): + step_model(ddp_model, ddp_input, ddp_target, accelerator) + ddp_opt.step() + ddp_sched.step() + + # Learning rates should be the same + assert ( + opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] + ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' + did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch + if accelerator.num_processes > 1: + check_model_parameters( + model, + ddp_model, + did_step, + iteration, + rtol=1e-3, # somehow needs a relative tolerance + ) + + if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch: + opt.zero_grad() # needs to be guarded by logic as to when we should zero grads + ddp_opt.zero_grad() + + # Shuffle ddp_input on each iteration + torch.manual_seed(1337 + iteration) + GradientState._reset_state() + + +def test_dataloader_break(): + accelerator = Accelerator() + + first_dset = RegressionDataset(length=80) + first_dataloader = DataLoader(first_dset, batch_size=16) + second_dset = RegressionDataset(length=96) + second_dataloader = DataLoader(second_dset, batch_size=16) + first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader) + assert accelerator.gradient_state.active_dataloader is None + for iteration, _ in enumerate(first_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader) + if iteration < len(first_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + if iteration == 1: + for batch_num, _ in enumerate(second_dataloader): + assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader) + if batch_num < len(second_dataloader) - 1: + assert not accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + else: + assert accelerator.gradient_state.end_of_dataloader + assert accelerator.gradient_state.active_dataloader is None + + +def main(): + accelerator = Accelerator() + state = accelerator.state + if state.local_process_index == 0: + print("**Test `accumulate` gradient accumulation with dataloader break**") + if state.distributed_type != DistributedType.XLA: + test_dataloader_break() + if state.distributed_type == DistributedType.NO: + if state.local_process_index == 0: + print("**Test NOOP `no_sync` context manager**") + test_noop_sync(accelerator) + if state.distributed_type in ( + DistributedType.MULTI_GPU, + DistributedType.MULTI_NPU, + DistributedType.MULTI_MLU, + DistributedType.MULTI_CPU, + ): + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager**") + test_distributed_sync(accelerator) + if state.local_process_index == 0: + print("**Test Distributed `no_sync` context manager with multiple forwards**") + test_distributed_sync_multiple_fwd(accelerator) + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation(split_batch, dispatch_batches, sync_each_batch) + + # Currently will break on torch 2.0 +, need to investigate why + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + "`split_batches=False`, `dispatch_batches=False`, `sync_each_batch=False`**", + ) + test_gradient_accumulation_with_opt_and_scheduler() + if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU): + for split_batch in [True, False]: + for dispatch_batches in [True, False]: + for sync_each_batch in [True, False]: + if not split_batch and not dispatch_batches and not sync_each_batch: + continue + if state.local_process_index == 0: + print( + "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", + f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**", + ) + test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches, sync_each_batch) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/testing.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..179fff4808a1c36fcdc8026a617a4cad549bd796 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/testing.py @@ -0,0 +1,605 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import inspect +import os +import shutil +import subprocess +import sys +import tempfile +import unittest +from contextlib import contextmanager +from functools import partial +from pathlib import Path +from typing import List, Union +from unittest import mock + +import torch + +import accelerate + +from ..state import AcceleratorState, PartialState +from ..utils import ( + gather, + is_bnb_available, + is_clearml_available, + is_comet_ml_available, + is_cuda_available, + is_datasets_available, + is_deepspeed_available, + is_dvclive_available, + is_mlu_available, + is_mps_available, + is_npu_available, + is_pandas_available, + is_pippy_available, + is_tensorboard_available, + is_timm_available, + is_torch_version, + is_torch_xla_available, + is_transformers_available, + is_wandb_available, + is_xpu_available, + str_to_bool, +) + + +def get_backend(): + if is_torch_xla_available(): + return "xla", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_cuda_available(): + return "cuda", torch.cuda.device_count(), torch.cuda.memory_allocated + elif is_mps_available(): + return "mps", 1, torch.mps.current_allocated_memory() + elif is_mlu_available(): + return "mlu", torch.mlu.device_count(), torch.mlu.memory_allocated + elif is_npu_available(): + return "npu", torch.npu.device_count(), torch.npu.memory_allocated + elif is_xpu_available(): + return "xpu", torch.xpu.device_count(), torch.xpu.memory_allocated + else: + return "cpu", 1, 0 + + +torch_device, device_count, memory_allocated_func = get_backend() + + +def get_launch_command(**kwargs) -> list: + """ + Wraps around `kwargs` to help simplify launching from `subprocess`. + + Example: + ```python + # returns ['accelerate', 'launch', '--num_processes=2', '--device_count=2'] + get_launch_command(num_processes=2, device_count=2) + ``` + """ + command = ["accelerate", "launch"] + for k, v in kwargs.items(): + if isinstance(v, bool) and v: + command.append(f"--{k}") + elif v is not None: + command.append(f"--{k}={v}") + return command + + +DEFAULT_LAUNCH_COMMAND = get_launch_command(num_processes=device_count) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) + + +def skip(test_case): + "Decorator that skips a test unconditionally" + return unittest.skip("Test was skipped")(test_case) + + +def slow(test_case): + """ + Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a + truthy value to run them. + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def require_cpu(test_case): + """ + Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available. + """ + return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case) + + +def require_non_cpu(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case) + + +def require_cuda(test_case): + """ + Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available or when + TorchXLA is available. + """ + return unittest.skipUnless(is_cuda_available() and not is_torch_xla_available(), "test requires a GPU")(test_case) + + +def require_xpu(test_case): + """ + Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available. + """ + return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case) + + +def require_non_xpu(test_case): + """ + Decorator marking a test that should be skipped for XPU. + """ + return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case) + + +def require_mlu(test_case): + """ + Decorator marking a test that requires MLU. These tests are skipped when there are no MLU available. + """ + return unittest.skipUnless(is_mlu_available(), "test require a MLU")(test_case) + + +def require_npu(test_case): + """ + Decorator marking a test that requires NPU. These tests are skipped when there are no NPU available. + """ + return unittest.skipUnless(is_npu_available(), "test require a NPU")(test_case) + + +def require_mps(test_case): + """ + Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps` + backend. + """ + return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case) + + +def require_huggingface_suite(test_case): + """ + Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not. + """ + return unittest.skipUnless( + is_transformers_available() and is_datasets_available(), + "test requires the Hugging Face suite", + )(test_case) + + +def require_transformers(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires transformers. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case) + + +def require_bnb(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not. + """ + return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case) + + +def require_tpu(test_case): + """ + Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available. + """ + return unittest.skipUnless(is_torch_xla_available(check_is_tpu=True), "test requires TPU")(test_case) + + +def require_non_torch_xla(test_case): + """ + Decorator marking a test as requiring an environment without TorchXLA. These tests are skipped when TorchXLA is + available. + """ + return unittest.skipUnless(not is_torch_xla_available(), "test requires an env without TorchXLA")(test_case) + + +def require_single_device(test_case): + """ + Decorator marking a test that requires a single device. These tests are skipped when there is no hardware + accelerator available or number of devices is more than one. + """ + return unittest.skipUnless(torch_device != "cpu" and device_count == 1, "test requires a hardware accelerator")( + test_case + ) + + +def require_single_gpu(test_case): + """ + Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU + available or number of GPUs is more than one. + """ + return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case) + + +def require_single_xpu(test_case): + """ + Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU + available or number of xPUs is more than one. + """ + return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case) + + +def require_multi_device(test_case): + """ + Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple + devices. + """ + return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case) + + +def require_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple + GPUs. + """ + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_multi_xpu(test_case): + """ + Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple + XPUs. + """ + return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case) + + +def require_deepspeed(test_case): + """ + Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed + """ + return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case) + + +def require_fsdp(test_case): + """ + Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed + """ + return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case) + + +def require_torch_min_version(test_case=None, version=None): + """ + Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an + installed torch version is less than the required one. + """ + if test_case is None: + return partial(require_torch_min_version, version=version) + return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case) + + +def require_tensorboard(test_case): + """ + Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't + installed + """ + return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case) + + +def require_wandb(test_case): + """ + Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed + """ + return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case) + + +def require_comet_ml(test_case): + """ + Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed + """ + return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case) + + +def require_clearml(test_case): + """ + Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed + """ + return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case) + + +def require_dvclive(test_case): + """ + Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed + """ + return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case) + + +def require_pandas(test_case): + """ + Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed + """ + return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case) + + +def require_pippy(test_case): + """ + Decorator marking a test that requires pippy installed. These tests are skipped when pippy isn't installed + """ + return unittest.skipUnless(is_pippy_available(), "test requires pippy")(test_case) + + +_atleast_one_tracker_available = ( + any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() +) + + +def require_trackers(test_case): + """ + Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none + are installed + """ + return unittest.skipUnless( + _atleast_one_tracker_available, + "test requires at least one tracker to be available and for `comet_ml` to not be installed", + )(test_case) + + +class TempDirTestCase(unittest.TestCase): + """ + A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its + data at the start of a test, and then destroyes it at the end of the TestCase. + + Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases + + The temporary directory location will be stored in `self.tmpdir` + """ + + clear_on_setup = True + + @classmethod + def setUpClass(cls): + "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`" + cls.tmpdir = Path(tempfile.mkdtemp()) + + @classmethod + def tearDownClass(cls): + "Remove `cls.tmpdir` after test suite has finished" + if os.path.exists(cls.tmpdir): + shutil.rmtree(cls.tmpdir) + + def setUp(self): + "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`" + if self.clear_on_setup: + for path in self.tmpdir.glob("**/*"): + if path.is_file(): + path.unlink() + elif path.is_dir(): + shutil.rmtree(path) + + +class AccelerateTestCase(unittest.TestCase): + """ + A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes + the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between + tests. + """ + + def tearDown(self): + super().tearDown() + # Reset the state of the AcceleratorState singleton. + AcceleratorState._reset_state() + PartialState._reset_state() + + +class MockingTestCase(unittest.TestCase): + """ + A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the + behavior of a class-wide mock when defining one normally will not do. + + Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as + setting an environment variable with that information. + + The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to + `super().setUp()` such as: + ```python + def setUp(self): + super().setUp() + mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"}) + self.add_mocks(mocks) + ``` + """ + + def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]): + """ + Add custom mocks for tests that should be repeated on each test. Should be called during + `MockingTestCase.setUp`, after `super().setUp()`. + + Args: + mocks (`mock.Mock` or list of `mock.Mock`): + Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run + """ + self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks] + for m in self.mocks: + m.start() + self.addCleanup(m.stop) + + +def are_the_same_tensors(tensor): + state = AcceleratorState() + tensor = tensor[None].clone().to(state.device) + tensors = gather(tensor).cpu() + tensor = tensor[0].cpu() + for i in range(tensors.shape[0]): + if not torch.equal(tensors[i], tensor): + return False + return True + + +class _RunOutput: + def __init__(self, returncode, stdout, stderr): + self.returncode = returncode + self.stdout = stdout + self.stderr = stderr + + +async def _read_stream(stream, callback): + while True: + line = await stream.readline() + if line: + callback(line) + else: + break + + +async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: + if echo: + print("\nRunning: ", " ".join(cmd)) + + p = await asyncio.create_subprocess_exec( + cmd[0], + *cmd[1:], + stdin=stdin, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + env=env, + ) + + # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe + # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait + # + # If it starts hanging, will need to switch to the following code. The problem is that no data + # will be seen until it's done and if it hangs for example there will be no debug info. + # out, err = await p.communicate() + # return _RunOutput(p.returncode, out, err) + + out = [] + err = [] + + def tee(line, sink, pipe, label=""): + line = line.decode("utf-8").rstrip() + sink.append(line) + if not quiet: + print(label, line, file=pipe) + + # XXX: the timeout doesn't seem to make any difference here + await asyncio.wait( + [ + asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))), + asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))), + ], + timeout=timeout, + ) + return _RunOutput(await p.wait(), out, err) + + +def execute_subprocess_async(cmd: list, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: + # Cast every path in `cmd` to a string + for i, c in enumerate(cmd): + if isinstance(c, Path): + cmd[i] = str(c) + loop = asyncio.get_event_loop() + result = loop.run_until_complete( + _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) + ) + + cmd_str = " ".join(cmd) + if result.returncode > 0: + stderr = "\n".join(result.stderr) + raise RuntimeError( + f"'{cmd_str}' failed with returncode {result.returncode}\n\n" + f"The combined stderr from workers follows:\n{stderr}" + ) + + return result + + +class SubprocessCallException(Exception): + pass + + +def run_command(command: List[str], return_stdout=False, env=None): + """ + Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture + if an error occured while running `command` + """ + # Cast every path in `command` to a string + for i, c in enumerate(command): + if isinstance(c, Path): + command[i] = str(c) + if env is None: + env = os.environ.copy() + try: + output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env) + if return_stdout: + if hasattr(output, "decode"): + output = output.decode("utf-8") + return output + except subprocess.CalledProcessError as e: + raise SubprocessCallException( + f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" + ) from e + + +def path_in_accelerate_package(*components: str) -> Path: + """ + Get a path within the `accelerate` package's directory. + + Args: + *components: Components of the path to join after the package directory. + + Returns: + `Path`: The path to the requested file or directory. + """ + + accelerate_package_dir = Path(inspect.getfile(accelerate)).parent + return accelerate_package_dir.joinpath(*components) + + +@contextmanager +def assert_exception(exception_class: Exception, msg: str = None) -> bool: + """ + Context manager to assert that the right `Exception` class was raised. + + If `msg` is provided, will check that the message is contained in the raised exception. + """ + was_ran = False + try: + yield + was_ran = True + except Exception as e: + assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}" + if msg is not None: + assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'" + if was_ran: + raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.") diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/training.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/training.py new file mode 100644 index 0000000000000000000000000000000000000000..d89cfd3c71546871d00cb9c2a5cd07494c46cbfe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/training.py @@ -0,0 +1,101 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import torch +from torch.utils.data import DataLoader + +from accelerate.utils.dataclasses import DistributedType + + +class RegressionDataset: + def __init__(self, a=2, b=3, length=64, seed=None): + rng = np.random.default_rng(seed) + self.length = length + self.x = rng.normal(size=(length,)).astype(np.float32) + self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32) + + def __len__(self): + return self.length + + def __getitem__(self, i): + return {"x": self.x[i], "y": self.y[i]} + + +class RegressionModel4XPU(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.b = torch.nn.Parameter(torch.tensor([2, 3]).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a[0] + self.b[0] + + +class RegressionModel(torch.nn.Module): + def __init__(self, a=0, b=0, double_output=False): + super().__init__() + self.a = torch.nn.Parameter(torch.tensor(a).float()) + self.b = torch.nn.Parameter(torch.tensor(b).float()) + self.first_batch = True + + def forward(self, x=None): + if self.first_batch: + print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}") + self.first_batch = False + return x * self.a + self.b + + +def mocked_dataloaders(accelerator, batch_size: int = 16): + from datasets import load_dataset + from transformers import AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} + datasets = load_dataset("csv", data_files=data_files) + label_list = datasets["train"].unique("label") + + label_to_id = {v: i for i, v in enumerate(label_list)} + + def tokenize_function(examples): + # max_length=None => use the model max length (it's actually the default) + outputs = tokenizer( + examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length" + ) + if "label" in examples: + outputs["labels"] = [label_to_id[l] for l in examples["label"]] + return outputs + + # Apply the method we just defined to all the examples in all the splits of the dataset + tokenized_datasets = datasets.map( + tokenize_function, + batched=True, + remove_columns=["sentence1", "sentence2", "label"], + ) + + def collate_fn(examples): + # On TPU it's best to pad everything to the same length or training will be very slow. + if accelerator.distributed_type == DistributedType.XLA: + return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") + return tokenizer.pad(examples, padding="longest", return_tensors="pt") + + # Instantiate dataloaders. + train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2) + eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1) + + return train_dataloader, eval_dataloader diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/tracking.py b/env-llmeval/lib/python3.10/site-packages/accelerate/tracking.py new file mode 100644 index 0000000000000000000000000000000000000000..5efba19bc6769d9c70ea8b17b8da784b908f529f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/accelerate/tracking.py @@ -0,0 +1,1023 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Expectation: +# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`} + +import json +import os +import time +from functools import wraps +from typing import Any, Dict, List, Optional, Union + +import yaml + +from .logging import get_logger +from .state import PartialState +from .utils import ( + LoggerType, + is_aim_available, + is_clearml_available, + is_comet_ml_available, + is_dvclive_available, + is_mlflow_available, + is_tensorboard_available, + is_wandb_available, + listify, +) + + +_available_trackers = [] + +if is_tensorboard_available(): + _available_trackers.append(LoggerType.TENSORBOARD) + +if is_wandb_available(): + _available_trackers.append(LoggerType.WANDB) + +if is_comet_ml_available(): + _available_trackers.append(LoggerType.COMETML) + +if is_aim_available(): + _available_trackers.append(LoggerType.AIM) + +if is_mlflow_available(): + _available_trackers.append(LoggerType.MLFLOW) + +if is_clearml_available(): + _available_trackers.append(LoggerType.CLEARML) + +if is_dvclive_available(): + _available_trackers.append(LoggerType.DVCLIVE) + +logger = get_logger(__name__) + + +def on_main_process(function): + """ + Decorator to selectively run the decorated function on the main process only based on the `main_process_only` + attribute in a class. + + Checks at function execution rather than initialization time, not triggering the initialization of the + `PartialState`. + """ + + @wraps(function) + def execute_on_main_process(self, *args, **kwargs): + if getattr(self, "main_process_only", False): + return PartialState().on_main_process(function)(self, *args, **kwargs) + else: + return function(self, *args, **kwargs) + + return execute_on_main_process + + +def get_available_trackers(): + "Returns a list of all supported available trackers in the system" + return _available_trackers + + +class GeneralTracker: + """ + A base Tracker class to be used for all logging integration implementations. + + Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to + [`Accelerator`]. + + Should implement `name`, `requires_logging_directory`, and `tracker` properties such that: + + `name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory` + (`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal + tracking mechanism used by a tracker class (such as the `run` for wandb) + + Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and + other functions should occur on the main process or across all processes (by default will use `True`) + """ + + main_process_only = True + + def __init__(self, _blank=False): + if not _blank: + err = "" + if not hasattr(self, "name"): + err += "`name`" + if not hasattr(self, "requires_logging_directory"): + if len(err) > 0: + err += ", " + err += "`requires_logging_directory`" + + # as tracker is a @property that relies on post-init + if "tracker" not in dir(self): + if len(err) > 0: + err += ", " + err += "`tracker`" + if len(err) > 0: + raise NotImplementedError( + f"The implementation for this tracker class is missing the following " + f"required attributes. Please define them in the class definition: " + f"{err}" + ) + + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration + functionality of a tracking API. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + pass + + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with + special behavior for the `step parameter. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + pass + + def finish(self): + """ + Should run any finalizing functions within the tracking API. If the API should not have one, just don't + overwrite that method. + """ + pass + + +class TensorBoardTracker(GeneralTracker): + """ + A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run + logging_dir (`str`, `os.PathLike`): + Location for TensorBoard logs to be stored. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method. + """ + + name = "tensorboard" + requires_logging_directory = True + + @on_main_process + def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs): + try: + from torch.utils import tensorboard + except ModuleNotFoundError: + import tensorboardX as tensorboard + super().__init__() + self.run_name = run_name + self.logging_dir = os.path.join(logging_dir, run_name) + self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs) + logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.add_hparams(values, metric_dict={}) + self.writer.flush() + project_run_name = time.time() + dir_name = os.path.join(self.logging_dir, str(project_run_name)) + os.makedirs(dir_name, exist_ok=True) + with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile: + try: + yaml.dump(values, outfile) + except yaml.representer.RepresenterError: + logger.error("Serialization to store hyperparameters failed") + raise + logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `SummaryWriter.add_scaler`, + `SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`. + """ + values = listify(values) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.add_scalar(k, v, global_step=step, **kwargs) + elif isinstance(v, str): + self.writer.add_text(k, v, global_step=step, **kwargs) + elif isinstance(v, dict): + self.writer.add_scalars(k, v, global_step=step, **kwargs) + self.writer.flush() + logger.debug("Successfully logged to TensorBoard") + + @on_main_process + def log_images(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `SummaryWriter.add_image` method. + """ + for k, v in values.items(): + self.writer.add_images(k, v, global_step=step, **kwargs) + logger.debug("Successfully logged images to TensorBoard") + + @on_main_process + def finish(self): + """ + Closes `TensorBoard` writer + """ + self.writer.close() + logger.debug("TensorBoard writer closed") + + +class WandBTracker(GeneralTracker): + """ + A `Tracker` class that supports `wandb`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `wandb.init` method. + """ + + name = "wandb" + requires_logging_directory = False + main_process_only = False + + @on_main_process + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + + import wandb + + self.run = wandb.init(project=self.run_name, **kwargs) + logger.debug(f"Initialized WandB project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + import wandb + + wandb.config.update(values, allow_val_change=True) + logger.debug("Stored initial configuration hyperparameters to WandB") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + self.run.log(values, step=step, **kwargs) + logger.debug("Successfully logged to WandB") + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `wandb.log` method. + """ + import wandb + + for k, v in values.items(): + self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs) + logger.debug("Successfully logged images to WandB") + + @on_main_process + def log_table( + self, + table_name: str, + columns: List[str] = None, + data: List[List[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either + with `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name to give to the logged table on the wandb workspace + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + import wandb + + values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)} + self.log(values, step=step, **kwargs) + + @on_main_process + def finish(self): + """ + Closes `wandb` writer + """ + self.run.finish() + logger.debug("WandB run closed") + + +class CometMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script. + + API keys must be stored in a Comet config file. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `Experiment.__init__` method. + """ + + name = "comet_ml" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: str, **kwargs): + super().__init__() + self.run_name = run_name + + from comet_ml import Experiment + + self.writer = Experiment(project_name=run_name, **kwargs) + logger.debug(f"Initialized CometML project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (Dictionary `str` to `bool`, `str`, `float` or `int`): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, `int`, or `None`. + """ + self.writer.log_parameters(values) + logger.debug("Stored initial configuration hyperparameters to CometML") + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of + `str` to `float`/`int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`, + or `Experiment.log_metrics` method based on the contents of `values`. + """ + if step is not None: + self.writer.set_step(step) + for k, v in values.items(): + if isinstance(v, (int, float)): + self.writer.log_metric(k, v, step=step, **kwargs) + elif isinstance(v, str): + self.writer.log_other(k, v, **kwargs) + elif isinstance(v, dict): + self.writer.log_metrics(v, step=step, **kwargs) + logger.debug("Successfully logged to CometML") + + @on_main_process + def finish(self): + """ + Closes `comet-ml` writer + """ + self.writer.end() + logger.debug("CometML run closed") + + +class AimTracker(GeneralTracker): + """ + A `Tracker` class that supports `aim`. Should be initialized at the start of your script. + + Args: + run_name (`str`): + The name of the experiment run. + **kwargs (additional keyword arguments, *optional*): + Additional key word arguments passed along to the `Run.__init__` method. + """ + + name = "aim" + requires_logging_directory = True + + @on_main_process + def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs): + self.run_name = run_name + + from aim import Run + + self.writer = Run(repo=logging_dir, **kwargs) + self.writer.name = self.run_name + logger.debug(f"Initialized Aim project {self.run_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.writer + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + self.writer["hparams"] = values + + @on_main_process + def log(self, values: dict, step: Optional[int], **kwargs): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `Run.track` method. + """ + # Note: replace this with the dictionary support when merged + for key, value in values.items(): + self.writer.track(value, name=key, step=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`): + Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a + tuple is provided, the first element should be the image and the second element should be the caption. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs (`Dict[str, dict]`): + Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the + keys `aim_image` and `track`, respectively. + """ + import aim + + aim_image_kw = {} + track_kw = {} + + if kwargs is not None: + aim_image_kw = kwargs.get("aim_image", {}) + track_kw = kwargs.get("track", {}) + + for key, value in values.items(): + if isinstance(value, tuple): + img, caption = value + else: + img, caption = value, "" + aim_image = aim.Image(img, caption=caption, **aim_image_kw) + self.writer.track(aim_image, name=key, step=step, **track_kw) + + @on_main_process + def finish(self): + """ + Closes `aim` writer + """ + self.writer.close() + + +class MLflowTracker(GeneralTracker): + """ + A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script. + + Args: + experiment_name (`str`, *optional*): + Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument. + logging_dir (`str` or `os.PathLike`, defaults to `"."`): + Location for mlflow logs to be stored. + run_id (`str`, *optional*): + If specified, get the run with the specified UUID and log parameters and metrics under that run. The run’s + end time is unset and its status is set to running, but the run’s other attributes (source_version, + source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument. + tags (`Dict[str, str]`, *optional*): + An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a + run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are + set on the new run. Environment variable MLFLOW_TAGS has priority over this argument. + nested_run (`bool`, *optional*, defaults to `False`): + Controls whether run is nested in parent run. True creates a nested run. Environment variable + MLFLOW_NESTED_RUN has priority over this argument. + run_name (`str`, *optional*): + Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified. + description (`str`, *optional*): + An optional string that populates the description box of the run. If a run is being resumed, the + description is set on the resumed run. If a new run is being created, the description is set on the new + run. + """ + + name = "mlflow" + requires_logging_directory = False + + @on_main_process + def __init__( + self, + experiment_name: str = None, + logging_dir: Optional[Union[str, os.PathLike]] = None, + run_id: Optional[str] = None, + tags: Optional[Union[Dict[str, Any], str]] = None, + nested_run: Optional[bool] = False, + run_name: Optional[str] = None, + description: Optional[str] = None, + ): + experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name) + run_id = os.environ.get("MLFLOW_RUN_ID", run_id) + tags = os.environ.get("MLFLOW_TAGS", tags) + if isinstance(tags, str): + tags = json.loads(tags) + + nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run) + + import mlflow + + exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'") + if len(exps) > 0: + if len(exps) > 1: + logger.warning("Multiple experiments with the same name found. Using first one.") + experiment_id = exps[0].experiment_id + else: + experiment_id = mlflow.create_experiment( + name=experiment_name, + artifact_location=logging_dir, + tags=tags, + ) + + self.active_run = mlflow.start_run( + run_id=run_id, + experiment_id=experiment_id, + run_name=run_name, + nested=nested_run, + tags=tags, + description=description, + ) + + logger.debug(f"Initialized mlflow experiment {experiment_name}") + logger.debug( + "Make sure to log any initial configurations with `self.store_init_configuration` before training!" + ) + + @property + def tracker(self): + return self.active_run + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + import mlflow + + for name, value in list(values.items()): + # internally, all values are converted to str in MLflow + if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH: + logger.warning_once( + f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s' + f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute." + ) + del values[name] + + values_list = list(values.items()) + + # MLflow cannot log more than 100 values in one go, so we have to split it + for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH): + mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH])) + + logger.debug("Stored initial configuration hyperparameters to MLflow") + + @on_main_process + def log(self, values: dict, step: Optional[int]): + """ + Logs `values` to the current run. + + Args: + values (`dict`): + Values to be logged as key-value pairs. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + """ + metrics = {} + for k, v in values.items(): + if isinstance(v, (int, float)): + metrics[k] = v + else: + logger.warning_once( + f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. ' + "MLflow's log_metric() only accepts float and int types so we dropped this attribute." + ) + import mlflow + + mlflow.log_metrics(metrics, step=step) + logger.debug("Successfully logged to mlflow") + + @on_main_process + def finish(self): + """ + End the active MLflow run. + """ + import mlflow + + mlflow.end_run() + + +class ClearMLTracker(GeneralTracker): + """ + A `Tracker` class that supports `clearml`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this + argument. + **kwargs (additional keyword arguments, *optional*): + Kwargs passed along to the `Task.__init__` method. + """ + + name = "clearml" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: str = None, **kwargs): + from clearml import Task + + current_task = Task.current_task() + self._initialized_externally = False + if current_task: + self._initialized_externally = True + self.task = current_task + return + + kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name)) + kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name)) + self.task = Task.init(**kwargs) + + @property + def tracker(self): + return self.task + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment. + + Args: + values (`dict`): + Values to be stored as initial hyperparameters as key-value pairs. + """ + return self.task.connect_configuration(values) + + @on_main_process + def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs): + """ + Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be + ints or floats + + Args: + values (`Dict[str, Union[int, float]]`): + Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will + be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed. + Otherwise, the value will be reported under the 'train' series, and no prefix will be removed. + step (`int`, *optional*): + If specified, the values will be reported as scalars, with the iteration number equal to `step`. + Otherwise they will be reported as single values. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_single_value` or + `clearml.Logger.report_scalar` methods. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + if not isinstance(v, (int, float)): + logger.warning_once( + "Accelerator is attempting to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of ClearML logger's report_scalar() " + "is incorrect so we dropped this attribute." + ) + continue + if step is None: + clearml_logger.report_single_value(name=k, value=v, **kwargs) + continue + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs) + + @on_main_process + def log_images(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `images` to the current run. + + Args: + values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`): + Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_image` method. + """ + clearml_logger = self.task.get_logger() + for k, v in values.items(): + title, series = ClearMLTracker._get_title_series(k) + clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs) + + @on_main_process + def log_table( + self, + table_name: str, + columns: List[str] = None, + data: List[List[Any]] = None, + dataframe: Any = None, + step: Optional[int] = None, + **kwargs, + ): + """ + Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`. + + Args: + table_name (`str`): + The name of the table + columns (list of `str`, *optional*): + The name of the columns on the table + data (List of List of Any data type, *optional*): + The data to be logged in the table. If `columns` is not specified, then the first entry in data will be + the name of the columns of the table + dataframe (Any data type, *optional*): + The data to be logged in the table + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to the `clearml.Logger.report_table` method. + """ + to_report = dataframe + if dataframe is None: + if data is None: + raise ValueError( + "`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`" + ) + to_report = [columns] + data if columns else data + title, series = ClearMLTracker._get_title_series(table_name) + self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs) + + @on_main_process + def finish(self): + """ + Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this + function is a noop + """ + if self.task and not self._initialized_externally: + self.task.close() + + @staticmethod + def _get_title_series(name): + for prefix in ["eval", "test", "train"]: + if name.startswith(prefix + "_"): + return name[len(prefix) + 1 :], prefix + return name, "train" + + +class DVCLiveTracker(GeneralTracker): + """ + A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script. + + Args: + run_name (`str`, *optional*): + Ignored for dvclive. See `kwargs` instead. + kwargs: + Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live). + + Example: + + ```py + from accelerate import Accelerator + + accelerator = Accelerator(log_with="dvclive") + accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}}) + ``` + """ + + name = "dvclive" + requires_logging_directory = False + + @on_main_process + def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs): + from dvclive import Live + + super().__init__() + self.live = live if live is not None else Live(**kwargs) + + @property + def tracker(self): + return self.live + + @on_main_process + def store_init_configuration(self, values: dict): + """ + Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the + hyperparameters in a yaml file for future use. + + Args: + values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types): + Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`, + `str`, `float`, or `int`. + """ + self.live.log_params(values) + + @on_main_process + def log(self, values: dict, step: Optional[int] = None, **kwargs): + """ + Logs `values` to the current run. + + Args: + values (Dictionary `str` to `str`, `float`, or `int`): + Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`. + step (`int`, *optional*): + The run step. If included, the log will be affiliated with this step. + kwargs: + Additional key word arguments passed along to `dvclive.Live.log_metric()`. + """ + from dvclive.plots import Metric + + if step is not None: + self.live.step = step + for k, v in values.items(): + if Metric.could_log(v): + self.live.log_metric(k, v, **kwargs) + else: + logger.warning_once( + "Accelerator attempted to log a value of " + f'"{v}" of type {type(v)} for key "{k}" as a scalar. ' + "This invocation of DVCLive's Live.log_metric() " + "is incorrect so we dropped this attribute." + ) + self.live.next_step() + + @on_main_process + def finish(self): + """ + Closes `dvclive.Live()`. + """ + self.live.end() + + +LOGGER_TYPE_TO_CLASS = { + "aim": AimTracker, + "comet_ml": CometMLTracker, + "mlflow": MLflowTracker, + "tensorboard": TensorBoardTracker, + "wandb": WandBTracker, + "clearml": ClearMLTracker, + "dvclive": DVCLiveTracker, +} + + +def filter_trackers( + log_with: List[Union[str, LoggerType, GeneralTracker]], + logging_dir: Union[str, os.PathLike] = None, +): + """ + Takes in a list of potential tracker types and checks that: + - The tracker wanted is available in that environment + - Filters out repeats of tracker types + - If `all` is in `log_with`, will return all trackers in the environment + - If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None` + + Args: + log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*): + A list of loggers to be setup for experiment tracking. Should be one or several of: + + - `"all"` + - `"tensorboard"` + - `"wandb"` + - `"comet_ml"` + - `"mlflow"` + - `"dvclive"` + If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can + also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`. + logging_dir (`str`, `os.PathLike`, *optional*): + A path to a directory for storing logs of locally-compatible loggers. + """ + loggers = [] + if log_with is not None: + if not isinstance(log_with, (list, tuple)): + log_with = [log_with] + if "all" in log_with or LoggerType.ALL in log_with: + loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers() + else: + for log_type in log_with: + if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker): + raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}") + if issubclass(type(log_type), GeneralTracker): + loggers.append(log_type) + else: + log_type = LoggerType(log_type) + if log_type not in loggers: + if log_type in get_available_trackers(): + tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)] + if tracker_init.requires_logging_directory: + if logging_dir is None: + raise ValueError( + f"Logging with `{log_type}` requires a `logging_dir` to be passed in." + ) + loggers.append(log_type) + else: + logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.") + + return loggers diff --git a/env-llmeval/lib/python3.10/site-packages/isympy.py b/env-llmeval/lib/python3.10/site-packages/isympy.py new file mode 100644 index 0000000000000000000000000000000000000000..50e9bc78d08904b8c177105ee90d984ea4b01d20 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/isympy.py @@ -0,0 +1,342 @@ +""" +Python shell for SymPy. + +This is just a normal Python shell (IPython shell if you have the +IPython package installed), that executes the following commands for +the user: + + >>> from __future__ import division + >>> from sympy import * + >>> x, y, z, t = symbols('x y z t') + >>> k, m, n = symbols('k m n', integer=True) + >>> f, g, h = symbols('f g h', cls=Function) + >>> init_printing() + +So starting 'isympy' is equivalent to starting Python (or IPython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. isympy is a good way to use SymPy as an +interactive calculator. If you have IPython and Matplotlib installed, then +interactive plotting is enabled by default. + +COMMAND LINE OPTIONS +-------------------- + +-c CONSOLE, --console=CONSOLE + + Use the specified shell (Python or IPython) shell as the console + backend instead of the default one (IPython if present, Python + otherwise), e.g.: + + $isympy -c python + + CONSOLE must be one of 'ipython' or 'python' + +-p PRETTY, --pretty PRETTY + + Setup pretty-printing in SymPy. When pretty-printing is enabled, + expressions can be printed with Unicode or ASCII. The default is + to use pretty-printing (with Unicode if the terminal supports it). + When this option is 'no', expressions will not be pretty-printed + and ASCII will be used: + + $isympy -p no + + PRETTY must be one of 'unicode', 'ascii', or 'no' + +-t TYPES, --types=TYPES + + Setup the ground types for the polys. By default, gmpy ground types + are used if gmpy2 or gmpy is installed, otherwise it falls back to python + ground types, which are a little bit slower. You can manually + choose python ground types even if gmpy is installed (e.g., for + testing purposes): + + $isympy -t python + + TYPES must be one of 'gmpy', 'gmpy1' or 'python' + + Note that the ground type gmpy1 is primarily intended for testing; it + forces the use of gmpy version 1 even if gmpy2 is available. + + This is the same as setting the environment variable + SYMPY_GROUND_TYPES to the given ground type (e.g., + SYMPY_GROUND_TYPES='gmpy') + + The ground types can be determined interactively from the variable + sympy.polys.domains.GROUND_TYPES. + +-o ORDER, --order ORDER + + Setup the ordering of terms for printing. The default is lex, which + orders terms lexicographically (e.g., x**2 + x + 1). You can choose + other orderings, such as rev-lex, which will use reverse + lexicographic ordering (e.g., 1 + x + x**2): + + $isympy -o rev-lex + + ORDER must be one of 'lex', 'rev-lex', 'grlex', 'rev-grlex', + 'grevlex', 'rev-grevlex', 'old', or 'none'. + + Note that for very large expressions, ORDER='none' may speed up + printing considerably but the terms will have no canonical order. + +-q, --quiet + + Print only Python's and SymPy's versions to stdout at startup. + +-d, --doctest + + Use the same format that should be used for doctests. This is + equivalent to -c python -p no. + +-C, --no-cache + + Disable the caching mechanism. Disabling the cache may slow certain + operations down considerably. This is useful for testing the cache, + or for benchmarking, as the cache can result in deceptive timings. + + This is equivalent to setting the environment variable + SYMPY_USE_CACHE to 'no'. + +-a, --auto-symbols (requires at least IPython 0.11) + + Automatically create missing symbols. Normally, typing a name of a + Symbol that has not been instantiated first would raise NameError, + but with this option enabled, any undefined name will be + automatically created as a Symbol. + + Note that this is intended only for interactive, calculator style + usage. In a script that uses SymPy, Symbols should be instantiated + at the top, so that it's clear what they are. + + This will not override any names that are already defined, which + includes the single character letters represented by the mnemonic + QCOSINE (see the "Gotchas and Pitfalls" document in the + documentation). You can delete existing names by executing "del + name". If a name is defined, typing "'name' in dir()" will return True. + + The Symbols that are created using this have default assumptions. + If you want to place assumptions on symbols, you should create them + using symbols() or var(). + + Finally, this only works in the top level namespace. So, for + example, if you define a function in isympy with an undefined + Symbol, it will not work. + + See also the -i and -I options. + +-i, --int-to-Integer (requires at least IPython 0.11) + + Automatically wrap int literals with Integer. This makes it so that + things like 1/2 will come out as Rational(1, 2), rather than 0.5. This + works by preprocessing the source and wrapping all int literals with + Integer. Note that this will not change the behavior of int literals + assigned to variables, and it also won't change the behavior of functions + that return int literals. + + If you want an int, you can wrap the literal in int(), e.g. int(3)/int(2) + gives 1.5 (with division imported from __future__). + +-I, --interactive (requires at least IPython 0.11) + + This is equivalent to --auto-symbols --int-to-Integer. Future options + designed for ease of interactive use may be added to this. + +-D, --debug + + Enable debugging output. This is the same as setting the + environment variable SYMPY_DEBUG to 'True'. The debug status is set + in the variable SYMPY_DEBUG within isympy. + +-- IPython options + + Additionally you can pass command line options directly to the IPython + interpreter (the standard Python shell is not supported). However you + need to add the '--' separator between two types of options, e.g the + startup banner option and the colors option. You need to enter the + options as required by the version of IPython that you are using, too: + + in IPython 0.11, + + $isympy -q -- --colors=NoColor + + or older versions of IPython, + + $isympy -q -- -colors NoColor + +See also isympy --help. +""" + +import os +import sys + +# DO NOT IMPORT SYMPY HERE! Or the setting of the sympy environment variables +# by the command line will break. + +def main() -> None: + from argparse import ArgumentParser, RawDescriptionHelpFormatter + + VERSION = None + if '--version' in sys.argv: + # We cannot import sympy before this is run, because flags like -C and + # -t set environment variables that must be set before SymPy is + # imported. The only thing we need to import it for is to get the + # version, which only matters with the --version flag. + import sympy + VERSION = sympy.__version__ + + usage = 'isympy [options] -- [ipython options]' + parser = ArgumentParser( + usage=usage, + description=__doc__, + formatter_class=RawDescriptionHelpFormatter, + ) + + parser.add_argument('--version', action='version', version=VERSION) + + parser.add_argument( + '-c', '--console', + dest='console', + action='store', + default=None, + choices=['ipython', 'python'], + metavar='CONSOLE', + help='select type of interactive session: ipython | python; defaults ' + 'to ipython if IPython is installed, otherwise python') + + parser.add_argument( + '-p', '--pretty', + dest='pretty', + action='store', + default=None, + metavar='PRETTY', + choices=['unicode', 'ascii', 'no'], + help='setup pretty printing: unicode | ascii | no; defaults to ' + 'unicode printing if the terminal supports it, otherwise ascii') + + parser.add_argument( + '-t', '--types', + dest='types', + action='store', + default=None, + metavar='TYPES', + choices=['gmpy', 'gmpy1', 'python'], + help='setup ground types: gmpy | gmpy1 | python; defaults to gmpy if gmpy2 ' + 'or gmpy is installed, otherwise python') + + parser.add_argument( + '-o', '--order', + dest='order', + action='store', + default=None, + metavar='ORDER', + choices=['lex', 'grlex', 'grevlex', 'rev-lex', 'rev-grlex', 'rev-grevlex', 'old', 'none'], + help='setup ordering of terms: [rev-]lex | [rev-]grlex | [rev-]grevlex | old | none; defaults to lex') + + parser.add_argument( + '-q', '--quiet', + dest='quiet', + action='store_true', + default=False, + help='print only version information at startup') + + parser.add_argument( + '-d', '--doctest', + dest='doctest', + action='store_true', + default=False, + help='use the doctest format for output (you can just copy and paste it)') + + parser.add_argument( + '-C', '--no-cache', + dest='cache', + action='store_false', + default=True, + help='disable caching mechanism') + + parser.add_argument( + '-a', '--auto-symbols', + dest='auto_symbols', + action='store_true', + default=False, + help='automatically construct missing symbols') + + parser.add_argument( + '-i', '--int-to-Integer', + dest='auto_int_to_Integer', + action='store_true', + default=False, + help="automatically wrap int literals with Integer") + + parser.add_argument( + '-I', '--interactive', + dest='interactive', + action='store_true', + default=False, + help="equivalent to -a -i") + + parser.add_argument( + '-D', '--debug', + dest='debug', + action='store_true', + default=False, + help='enable debugging output') + + (options, ipy_args) = parser.parse_known_args() + if '--' in ipy_args: + ipy_args.remove('--') + + if not options.cache: + os.environ['SYMPY_USE_CACHE'] = 'no' + + if options.types: + os.environ['SYMPY_GROUND_TYPES'] = options.types + + if options.debug: + os.environ['SYMPY_DEBUG'] = str(options.debug) + + if options.doctest: + options.pretty = 'no' + options.console = 'python' + + session = options.console + + if session is not None: + ipython = session == 'ipython' + else: + try: + import IPython + ipython = True + except ImportError: + if not options.quiet: + from sympy.interactive.session import no_ipython + print(no_ipython) + ipython = False + + args = { + 'pretty_print': True, + 'use_unicode': None, + 'use_latex': None, + 'order': None, + 'argv': ipy_args, + } + + if options.pretty == 'unicode': + args['use_unicode'] = True + elif options.pretty == 'ascii': + args['use_unicode'] = False + elif options.pretty == 'no': + args['pretty_print'] = False + + if options.order is not None: + args['order'] = options.order + + args['quiet'] = options.quiet + args['auto_symbols'] = options.auto_symbols or options.interactive + args['auto_int_to_Integer'] = options.auto_int_to_Integer or options.interactive + + from sympy.interactive import init_session + init_session(ipython, **args) + +if __name__ == "__main__": + main() diff --git a/env-llmeval/lib/python3.10/site-packages/six.py b/env-llmeval/lib/python3.10/site-packages/six.py new file mode 100644 index 0000000000000000000000000000000000000000..4e15675d8b5caa33255fe37271700f587bd26671 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/six.py @@ -0,0 +1,998 @@ +# Copyright (c) 2010-2020 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.16.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + +if PY34: + from importlib.util import spec_from_loader +else: + spec_from_loader = None + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def find_spec(self, fullname, path, target=None): + if fullname in self.known_modules: + return spec_from_loader(fullname, self) + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + del io + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" + _assertNotRegex = "assertNotRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +def assertNotRegex(self, *args, **kwargs): + return getattr(self, _assertNotRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] > (3,): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + # This does exactly the same what the :func:`py3:functools.update_wrapper` + # function does on Python versions after 3.2. It sets the ``__wrapped__`` + # attribute on ``wrapper`` object and it doesn't raise an error if any of + # the attributes mentioned in ``assigned`` and ``updated`` are missing on + # ``wrapped`` object. + def _update_wrapper(wrapper, wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + continue + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + wrapper.__wrapped__ = wrapped + return wrapper + _update_wrapper.__doc__ = functools.update_wrapper.__doc__ + + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + return functools.partial(_update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + wraps.__doc__ = functools.wraps.__doc__ + +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d['__orig_bases__'] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + if hasattr(cls, '__qualname__'): + orig_vars['__qualname__'] = cls.__qualname__ + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def ensure_binary(s, encoding='utf-8', errors='strict'): + """Coerce **s** to six.binary_type. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> encoded to `bytes` + - `bytes` -> `bytes` + """ + if isinstance(s, binary_type): + return s + if isinstance(s, text_type): + return s.encode(encoding, errors) + raise TypeError("not expecting type '%s'" % type(s)) + + +def ensure_str(s, encoding='utf-8', errors='strict'): + """Coerce *s* to `str`. + + For Python 2: + - `unicode` -> encoded to `str` + - `str` -> `str` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + # Optimization: Fast return for the common case. + if type(s) is str: + return s + if PY2 and isinstance(s, text_type): + return s.encode(encoding, errors) + elif PY3 and isinstance(s, binary_type): + return s.decode(encoding, errors) + elif not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) + return s + + +def ensure_text(s, encoding='utf-8', errors='strict'): + """Coerce *s* to six.text_type. + + For Python 2: + - `unicode` -> `unicode` + - `str` -> `unicode` + + For Python 3: + - `str` -> `str` + - `bytes` -> decoded to `str` + """ + if isinstance(s, binary_type): + return s.decode(encoding, errors) + elif isinstance(s, text_type): + return s + else: + raise TypeError("not expecting type '%s'" % type(s)) + + +def python_2_unicode_compatible(klass): + """ + A class decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/env-llmeval/lib/python3.10/site-packages/sqlitedict.py b/env-llmeval/lib/python3.10/site-packages/sqlitedict.py new file mode 100644 index 0000000000000000000000000000000000000000..3e65724a2f1d32c8105f7ccae7eafe54f6c2ba5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/sqlitedict.py @@ -0,0 +1,697 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# This code is distributed under the terms and conditions +# from the Apache License, Version 2.0 +# +# http://opensource.org/licenses/apache2.0.php +# +# This code was inspired by: +# * http://code.activestate.com/recipes/576638-draft-for-an-sqlite3-based-dbm/ +# * http://code.activestate.com/recipes/526618/ + +""" +A lightweight wrapper around Python's sqlite3 database, with a dict-like interface +and multi-thread access support:: + +>>> mydict = SqliteDict('some.db', autocommit=True) # the mapping will be persisted to file `some.db` +>>> mydict['some_key'] = any_picklable_object +>>> print mydict['some_key'] +>>> print len(mydict) # etc... all dict functions work + +Pickle is used internally to serialize the values. Keys are strings. + +If you don't use autocommit (default is no autocommit for performance), then +don't forget to call `mydict.commit()` when done with a transaction. + +""" + +import sqlite3 +import os +import sys +import tempfile +import threading +import logging +import traceback +from base64 import b64decode, b64encode +import weakref + +__version__ = '2.1.0' + + +def reraise(tp, value, tb=None): + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + +try: + from cPickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL +except ImportError: + from pickle import dumps, loads, HIGHEST_PROTOCOL as PICKLE_PROTOCOL + +# some Python 3 vs 2 imports +try: + from collections import UserDict as DictClass +except ImportError: + from UserDict import DictMixin as DictClass + +try: + from queue import Queue +except ImportError: + from Queue import Queue + + +logger = logging.getLogger(__name__) + +# +# There's a thread that holds the actual SQL connection (SqliteMultithread). +# We communicate with this thread via queues (request and responses). +# The requests can either be SQL commands or one of the "special" commands +# below: +# +# _REQUEST_CLOSE: request that the SQL connection be closed +# _REQUEST_COMMIT: request that any changes be committed to the DB +# +# Responses are either SQL records (e.g. results of a SELECT) or the magic +# _RESPONSE_NO_MORE command, which indicates nothing else will ever be written +# to the response queue. +# +_REQUEST_CLOSE = '--close--' +_REQUEST_COMMIT = '--commit--' +_RESPONSE_NO_MORE = '--no more--' + +# +# We work with weak references for better memory efficiency. +# Dereferencing, checking the referent queue still exists, and putting to it +# is boring and repetitive, so we have a _put function to handle it for us. +# +_PUT_OK, _PUT_REFERENT_DESTROYED, _PUT_NOOP = 0, 1, 2 + + +def _put(queue_reference, item): + if queue_reference is not None: + queue = queue_reference() + if queue is None: + # + # We got a reference to a queue, but that queue no longer exists + # + retval = _PUT_REFERENT_DESTROYED + else: + queue.put(item) + retval = _PUT_OK + + del queue + return retval + + # + # We didn't get a reference to a queue, so do nothing (no-op). + # + return _PUT_NOOP + + +def open(*args, **kwargs): + """See documentation of the SqliteDict class.""" + return SqliteDict(*args, **kwargs) + + +def encode(obj): + """Serialize an object using pickle to a binary format accepted by SQLite.""" + return sqlite3.Binary(dumps(obj, protocol=PICKLE_PROTOCOL)) + + +def decode(obj): + """Deserialize objects retrieved from SQLite.""" + return loads(bytes(obj)) + + +def encode_key(key): + """Serialize a key using pickle + base64 encoding to text accepted by SQLite.""" + return b64encode(dumps(key, protocol=PICKLE_PROTOCOL)).decode("ascii") + + +def decode_key(key): + """Deserialize a key retrieved from SQLite.""" + return loads(b64decode(key.encode("ascii"))) + + +def identity(obj): + """Identity f(x) = x function for encoding/decoding.""" + return obj + + +class SqliteDict(DictClass): + VALID_FLAGS = ['c', 'r', 'w', 'n'] + + def __init__(self, filename=None, tablename='unnamed', flag='c', + autocommit=False, journal_mode="DELETE", encode=encode, + decode=decode, encode_key=identity, decode_key=identity, + timeout=5, outer_stack=True): + """ + Initialize a thread-safe sqlite-backed dictionary. The dictionary will + be a table `tablename` in database file `filename`. A single file (=database) + may contain multiple tables. + + If no `filename` is given, a random file in temp will be used (and deleted + from temp once the dict is closed/deleted). + + If you enable `autocommit`, changes will be committed after each operation + (more inefficient but safer). Otherwise, changes are committed on `self.commit()`, + `self.clear()` and `self.close()`. + + Set `journal_mode` to 'OFF' if you're experiencing sqlite I/O problems + or if you need performance and don't care about crash-consistency. + + Set `outer_stack` to False to disable the output of the outer exception + to the error logs. This may improve the efficiency of sqlitedict + operation at the expense of a detailed exception trace. + + The `flag` parameter. Exactly one of: + 'c': default mode, open for read/write, creating the db/table if necessary. + 'w': open for r/w, but drop `tablename` contents first (start with empty table) + 'r': open as read-only + 'n': create a new database (erasing any existing tables, not just `tablename`!). + + The `encode` and `decode` parameters are used to customize how the values + are serialized and deserialized. + The `encode` parameter must be a function that takes a single Python + object and returns a serialized representation. + The `decode` function must be a function that takes the serialized + representation produced by `encode` and returns a deserialized Python + object. + The default is to use pickle. + + The `timeout` defines the maximum time (in seconds) to wait for initial Thread startup. + + """ + self.in_temp = filename is None + if self.in_temp: + fd, filename = tempfile.mkstemp(prefix='sqldict') + os.close(fd) + + if flag not in SqliteDict.VALID_FLAGS: + raise RuntimeError("Unrecognized flag: %s" % flag) + self.flag = flag + + if flag == 'n': + if os.path.exists(filename): + os.remove(filename) + + dirname = os.path.dirname(filename) + if dirname: + if not os.path.exists(dirname): + raise RuntimeError('Error! The directory does not exist, %s' % dirname) + + self.filename = filename + + # Use standard SQL escaping of double quote characters in identifiers, by doubling them. + # See https://github.com/RaRe-Technologies/sqlitedict/pull/113 + self.tablename = tablename.replace('"', '""') + + self.autocommit = autocommit + self.journal_mode = journal_mode + self.encode = encode + self.decode = decode + self.encode_key = encode_key + self.decode_key = decode_key + self._outer_stack = outer_stack + + logger.debug("opening Sqlite table %r in %r" % (tablename, filename)) + self.conn = self._new_conn() + if self.flag == 'r': + if self.tablename not in SqliteDict.get_tablenames(self.filename): + msg = 'Refusing to create a new table "%s" in read-only DB mode' % tablename + raise RuntimeError(msg) + else: + MAKE_TABLE = 'CREATE TABLE IF NOT EXISTS "%s" (key TEXT PRIMARY KEY, value BLOB)' % self.tablename + self.conn.execute(MAKE_TABLE) + self.conn.commit() + if flag == 'w': + self.clear() + + def _new_conn(self): + return SqliteMultithread( + self.filename, + autocommit=self.autocommit, + journal_mode=self.journal_mode, + outer_stack=self._outer_stack, + ) + + def __enter__(self): + if not hasattr(self, 'conn') or self.conn is None: + self.conn = self._new_conn() + return self + + def __exit__(self, *exc_info): + self.close() + + def __str__(self): + return "SqliteDict(%s)" % (self.filename) + + def __repr__(self): + return str(self) # no need of something complex + + def __len__(self): + # `select count (*)` is super slow in sqlite (does a linear scan!!) + # As a result, len() is very slow too once the table size grows beyond trivial. + # We could keep the total count of rows ourselves, by means of triggers, + # but that seems too complicated and would slow down normal operation + # (insert/delete etc). + GET_LEN = 'SELECT COUNT(*) FROM "%s"' % self.tablename + rows = self.conn.select_one(GET_LEN)[0] + return rows if rows is not None else 0 + + def __bool__(self): + # No elements is False, otherwise True + GET_MAX = 'SELECT MAX(ROWID) FROM "%s"' % self.tablename + m = self.conn.select_one(GET_MAX)[0] + # Explicit better than implicit and bla bla + return True if m is not None else False + + def iterkeys(self): + GET_KEYS = 'SELECT key FROM "%s" ORDER BY rowid' % self.tablename + for key in self.conn.select(GET_KEYS): + yield self.decode_key(key[0]) + + def itervalues(self): + GET_VALUES = 'SELECT value FROM "%s" ORDER BY rowid' % self.tablename + for value in self.conn.select(GET_VALUES): + yield self.decode(value[0]) + + def iteritems(self): + GET_ITEMS = 'SELECT key, value FROM "%s" ORDER BY rowid' % self.tablename + for key, value in self.conn.select(GET_ITEMS): + yield self.decode_key(key), self.decode(value) + + def keys(self): + return self.iterkeys() + + def values(self): + return self.itervalues() + + def items(self): + return self.iteritems() + + def __contains__(self, key): + HAS_ITEM = 'SELECT 1 FROM "%s" WHERE key = ?' % self.tablename + return self.conn.select_one(HAS_ITEM, (self.encode_key(key),)) is not None + + def __getitem__(self, key): + GET_ITEM = 'SELECT value FROM "%s" WHERE key = ?' % self.tablename + item = self.conn.select_one(GET_ITEM, (self.encode_key(key),)) + if item is None: + raise KeyError(key) + return self.decode(item[0]) + + def __setitem__(self, key, value): + if self.flag == 'r': + raise RuntimeError('Refusing to write to read-only SqliteDict') + + ADD_ITEM = 'REPLACE INTO "%s" (key, value) VALUES (?,?)' % self.tablename + self.conn.execute(ADD_ITEM, (self.encode_key(key), self.encode(value))) + if self.autocommit: + self.commit() + + def __delitem__(self, key): + if self.flag == 'r': + raise RuntimeError('Refusing to delete from read-only SqliteDict') + + if key not in self: + raise KeyError(key) + DEL_ITEM = 'DELETE FROM "%s" WHERE key = ?' % self.tablename + self.conn.execute(DEL_ITEM, (self.encode_key(key),)) + if self.autocommit: + self.commit() + + def update(self, items=(), **kwds): + if self.flag == 'r': + raise RuntimeError('Refusing to update read-only SqliteDict') + + try: + items = items.items() + except AttributeError: + pass + items = [(self.encode_key(k), self.encode(v)) for k, v in items] + + UPDATE_ITEMS = 'REPLACE INTO "%s" (key, value) VALUES (?, ?)' % self.tablename + self.conn.executemany(UPDATE_ITEMS, items) + if kwds: + self.update(kwds) + if self.autocommit: + self.commit() + + def __iter__(self): + return self.iterkeys() + + def clear(self): + if self.flag == 'r': + raise RuntimeError('Refusing to clear read-only SqliteDict') + + # avoid VACUUM, as it gives "OperationalError: database schema has changed" + CLEAR_ALL = 'DELETE FROM "%s";' % self.tablename + self.conn.commit() + self.conn.execute(CLEAR_ALL) + self.conn.commit() + + @staticmethod + def get_tablenames(filename): + """get the names of the tables in an sqlite db as a list""" + if not os.path.isfile(filename): + raise IOError('file %s does not exist' % (filename)) + GET_TABLENAMES = 'SELECT name FROM sqlite_master WHERE type="table"' + with sqlite3.connect(filename) as conn: + cursor = conn.execute(GET_TABLENAMES) + res = cursor.fetchall() + + return [name[0] for name in res] + + def commit(self, blocking=True): + """ + Persist all data to disk. + + When `blocking` is False, the commit command is queued, but the data is + not guaranteed persisted (default implication when autocommit=True). + """ + if self.conn is not None: + self.conn.commit(blocking) + sync = commit + + def close(self, do_log=True, force=False): + if do_log: + logger.debug("closing %s" % self) + if hasattr(self, 'conn') and self.conn is not None: + if self.conn.autocommit and not force: + # typically calls to commit are non-blocking when autocommit is + # used. However, we need to block on close() to ensure any + # awaiting exceptions are handled and that all data is + # persisted to disk before returning. + self.conn.commit(blocking=True) + self.conn.close(force=force) + self.conn = None + if self.in_temp: + try: + os.remove(self.filename) + except Exception: + pass + + def terminate(self): + """Delete the underlying database file. Use with care.""" + if self.flag == 'r': + raise RuntimeError('Refusing to terminate read-only SqliteDict') + + self.close() + + if self.filename == ':memory:': + return + + logger.info("deleting %s" % self.filename) + try: + if os.path.isfile(self.filename): + os.remove(self.filename) + except (OSError, IOError): + logger.exception("failed to delete %s" % (self.filename)) + + def __del__(self): + # like close(), but assume globals are gone by now (do not log!) + try: + self.close(do_log=False, force=True) + except Exception: + # prevent error log flood in case of multiple SqliteDicts + # closed after connection lost (exceptions are always ignored + # in __del__ method. + pass + + +class SqliteMultithread(threading.Thread): + """ + Wrap sqlite connection in a way that allows concurrent requests from multiple threads. + + This is done by internally queueing the requests and processing them sequentially + in a separate thread (in the same order they arrived). + + """ + def __init__(self, filename, autocommit, journal_mode, outer_stack=True): + super(SqliteMultithread, self).__init__() + self.filename = filename + self.autocommit = autocommit + self.journal_mode = journal_mode + # use request queue of unlimited size + self.reqs = Queue() + self.daemon = True + self._outer_stack = outer_stack + self.log = logging.getLogger('sqlitedict.SqliteMultithread') + + # + # Parts of this object's state get accessed from different threads, so + # we use synchronization to avoid race conditions. For example, + # .exception gets set inside the new daemon thread that we spawned, but + # gets read from the main thread. This is particularly important + # during initialization: the Thread needs some time to actually start + # working, and until this happens, any calls to e.g. + # check_raise_error() will prematurely return None, meaning all is + # well. If the that connection happens to fail, we'll never know about + # it, and instead wait for a result that never arrives (effectively, + # deadlocking). Locking solves this problem by eliminating the race + # condition. + # + self._lock = threading.Lock() + self._lock.acquire() + self.exception = None + + self.start() + + def _connect(self): + """Connect to the underlying database. + + Raises an exception on failure. Returns the connection and cursor on success. + """ + try: + if self.autocommit: + conn = sqlite3.connect(self.filename, isolation_level=None, check_same_thread=False) + else: + conn = sqlite3.connect(self.filename, check_same_thread=False) + except Exception: + self.log.exception("Failed to initialize connection for filename: %s" % self.filename) + self.exception = sys.exc_info() + raise + + try: + conn.execute('PRAGMA journal_mode = %s' % self.journal_mode) + conn.text_factory = str + cursor = conn.cursor() + conn.commit() + cursor.execute('PRAGMA synchronous=OFF') + except Exception: + self.log.exception("Failed to execute PRAGMA statements.") + self.exception = sys.exc_info() + raise + + return conn, cursor + + def run(self): + # + # Nb. this is what actually runs inside the new daemon thread. + # self._lock is locked at this stage - see the initializer function. + # + try: + conn, cursor = self._connect() + finally: + self._lock.release() + + res_ref = None + while True: + # + # req: an SQL command or one of the --magic-- commands we use internally + # arg: arguments for the command + # res_ref: a weak reference to the queue into which responses must be placed + # outer_stack: the outer stack, for producing more informative traces in case of error + # + req, arg, res_ref, outer_stack = self.reqs.get() + + if req == _REQUEST_CLOSE: + assert res_ref, ('--close-- without return queue', res_ref) + break + elif req == _REQUEST_COMMIT: + conn.commit() + _put(res_ref, _RESPONSE_NO_MORE) + else: + try: + cursor.execute(req, arg) + except Exception: + with self._lock: + self.exception = (e_type, e_value, e_tb) = sys.exc_info() + + inner_stack = traceback.extract_stack() + + # An exception occurred in our thread, but we may not + # immediately able to throw it in our calling thread, if it has + # no return `res` queue: log as level ERROR both the inner and + # outer exception immediately. + # + # Any iteration of res.get() or any next call will detect the + # inner exception and re-raise it in the calling Thread; though + # it may be confusing to see an exception for an unrelated + # statement, an ERROR log statement from the 'sqlitedict.*' + # namespace contains the original outer stack location. + self.log.error('Inner exception:') + for item in traceback.format_list(inner_stack): + self.log.error(item) + self.log.error('') # deliniate traceback & exception w/blank line + for item in traceback.format_exception_only(e_type, e_value): + self.log.error(item) + + self.log.error('') # exception & outer stack w/blank line + + if self._outer_stack: + self.log.error('Outer stack:') + for item in traceback.format_list(outer_stack): + self.log.error(item) + self.log.error('Exception will be re-raised at next call.') + else: + self.log.error( + 'Unable to show the outer stack. Pass ' + 'outer_stack=True when initializing the ' + 'SqliteDict instance to show the outer stack.' + ) + + if res_ref: + for rec in cursor: + if _put(res_ref, rec) == _PUT_REFERENT_DESTROYED: + # + # The queue we are sending responses to got garbage + # collected. Nobody is listening anymore, so we + # stop sending responses. + # + break + + _put(res_ref, _RESPONSE_NO_MORE) + + if self.autocommit: + conn.commit() + + self.log.debug('received: %s, send: --no more--', req) + conn.close() + + _put(res_ref, _RESPONSE_NO_MORE) + + def check_raise_error(self): + """ + Check for and raise exception for any previous sqlite query. + + For the `execute*` family of method calls, such calls are non-blocking and any + exception raised in the thread cannot be handled by the calling Thread (usually + MainThread). This method is called on `close`, and prior to any subsequent + calls to the `execute*` methods to check for and raise an exception in a + previous call to the MainThread. + """ + with self._lock: + if self.exception: + e_type, e_value, e_tb = self.exception + + # clear self.exception, if the caller decides to handle such + # exception, we should not repeatedly re-raise it. + self.exception = None + + self.log.error('An exception occurred from a previous statement, view ' + 'the logging namespace "sqlitedict" for outer stack.') + + # The third argument to raise is the traceback object, and it is + # substituted instead of the current location as the place where + # the exception occurred, this is so that when using debuggers such + # as `pdb', or simply evaluating the naturally raised traceback, we + # retain the original (inner) location of where the exception + # occurred. + reraise(e_type, e_value, e_tb) + + def execute(self, req, arg=None, res=None): + """ + `execute` calls are non-blocking: just queue up the request and return immediately. + + :param req: The request (an SQL command) + :param arg: Arguments to the SQL command + :param res: A queue in which to place responses as they become available + """ + self.check_raise_error() + stack = None + + if self._outer_stack: + # NOTE: This might be a lot of information to pump into an input + # queue, affecting performance. I've also seen earlier versions of + # jython take a severe performance impact for throwing exceptions + # so often. + stack = traceback.extract_stack()[:-1] + + # + # We pass a weak reference to the response queue instead of a regular + # reference, because we want the queues to be garbage-collected + # more aggressively. + # + res_ref = None + if res: + res_ref = weakref.ref(res) + + self.reqs.put((req, arg or tuple(), res_ref, stack)) + + def executemany(self, req, items): + for item in items: + self.execute(req, item) + self.check_raise_error() + + def select(self, req, arg=None): + """ + Unlike sqlite's native select, this select doesn't handle iteration efficiently. + + The result of `select` starts filling up with values as soon as the + request is dequeued, and although you can iterate over the result normally + (`for res in self.select(): ...`), the entire result will be in memory. + """ + res = Queue() # results of the select will appear as items in this queue + self.execute(req, arg, res) + while True: + rec = res.get() + self.check_raise_error() + if rec == _RESPONSE_NO_MORE: + break + yield rec + + def select_one(self, req, arg=None): + """Return only the first row of the SELECT, or None if there are no matching rows.""" + try: + return next(iter(self.select(req, arg))) + except StopIteration: + return None + + def commit(self, blocking=True): + if blocking: + # by default, we await completion of commit() unless + # blocking=False. This ensures any available exceptions for any + # previous statement are thrown before returning, and that the + # data has actually persisted to disk! + self.select_one(_REQUEST_COMMIT) + else: + # otherwise, we fire and forget as usual. + self.execute(_REQUEST_COMMIT) + + def close(self, force=False): + if force: + # If a SqliteDict is being killed or garbage-collected, then select_one() + # could hang forever because run() might already have exited and therefore + # can't process the request. Instead, push the close command to the requests + # queue directly. If run() is still alive, it will exit gracefully. If not, + # then there's nothing we can do anyway. + self.reqs.put((_REQUEST_CLOSE, None, weakref.ref(Queue()), None)) + else: + # we abuse 'select' to "iter" over a "--close--" statement so that we + # can confirm the completion of close before joining the thread and + # returning (by semaphore '--no more--' + self.select_one(_REQUEST_CLOSE) + self.join() + + +# +# This is here for .github/workflows/release.yml +# +if __name__ == '__main__': + print(__version__) diff --git a/env-llmeval/pyvenv.cfg b/env-llmeval/pyvenv.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0537ffc00ba0979f914c62c057a72f4bf1f681c4 --- /dev/null +++ b/env-llmeval/pyvenv.cfg @@ -0,0 +1,3 @@ +home = /usr/bin +include-system-site-packages = false +version = 3.10.12 diff --git a/env-llmeval/share/man/man1/isympy.1 b/env-llmeval/share/man/man1/isympy.1 new file mode 100644 index 0000000000000000000000000000000000000000..0ff966158a28c5ad1a6cd954e454842b25fdd999 --- /dev/null +++ b/env-llmeval/share/man/man1/isympy.1 @@ -0,0 +1,188 @@ +'\" -*- coding: us-ascii -*- +.if \n(.g .ds T< \\FC +.if \n(.g .ds T> \\F[\n[.fam]] +.de URL +\\$2 \(la\\$1\(ra\\$3 +.. +.if \n(.g .mso www.tmac +.TH isympy 1 2007-10-8 "" "" +.SH NAME +isympy \- interactive shell for SymPy +.SH SYNOPSIS +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[\fB-c\fR | \fB--console\fR] [\fB-p\fR ENCODING | \fB--pretty\fR ENCODING] [\fB-t\fR TYPE | \fB--types\fR TYPE] [\fB-o\fR ORDER | \fB--order\fR ORDER] [\fB-q\fR | \fB--quiet\fR] [\fB-d\fR | \fB--doctest\fR] [\fB-C\fR | \fB--no-cache\fR] [\fB-a\fR | \fB--auto\fR] [\fB-D\fR | \fB--debug\fR] [ +-- | PYTHONOPTIONS] +'in \n(.iu-\nxu +.ad b +'hy +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[ +{\fB-h\fR | \fB--help\fR} +| +{\fB-v\fR | \fB--version\fR} +] +'in \n(.iu-\nxu +.ad b +'hy +.SH DESCRIPTION +isympy is a Python shell for SymPy. It is just a normal python shell +(ipython shell if you have the ipython package installed) that executes +the following commands so that you don't have to: +.PP +.nf +\*(T< +>>> from __future__ import division +>>> from sympy import * +>>> x, y, z = symbols("x,y,z") +>>> k, m, n = symbols("k,m,n", integer=True) + \*(T> +.fi +.PP +So starting isympy is equivalent to starting python (or ipython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. For more complicated programs, it is recommended +to write a script and import things explicitly (using the "from sympy +import sin, log, Symbol, ..." idiom). +.SH OPTIONS +.TP +\*(T<\fB\-c \fR\*(T>\fISHELL\fR, \*(T<\fB\-\-console=\fR\*(T>\fISHELL\fR +Use the specified shell (python or ipython) as +console backend instead of the default one (ipython +if present or python otherwise). + +Example: isympy -c python + +\fISHELL\fR could be either +\&'ipython' or 'python' +.TP +\*(T<\fB\-p \fR\*(T>\fIENCODING\fR, \*(T<\fB\-\-pretty=\fR\*(T>\fIENCODING\fR +Setup pretty printing in SymPy. By default, the most pretty, unicode +printing is enabled (if the terminal supports it). You can use less +pretty ASCII printing instead or no pretty printing at all. + +Example: isympy -p no + +\fIENCODING\fR must be one of 'unicode', +\&'ascii' or 'no'. +.TP +\*(T<\fB\-t \fR\*(T>\fITYPE\fR, \*(T<\fB\-\-types=\fR\*(T>\fITYPE\fR +Setup the ground types for the polys. By default, gmpy ground types +are used if gmpy2 or gmpy is installed, otherwise it falls back to python +ground types, which are a little bit slower. You can manually +choose python ground types even if gmpy is installed (e.g., for testing purposes). + +Note that sympy ground types are not supported, and should be used +only for experimental purposes. + +Note that the gmpy1 ground type is primarily intended for testing; it the +use of gmpy even if gmpy2 is available. + +This is the same as setting the environment variable +SYMPY_GROUND_TYPES to the given ground type (e.g., +SYMPY_GROUND_TYPES='gmpy') + +The ground types can be determined interactively from the variable +sympy.polys.domains.GROUND_TYPES inside the isympy shell itself. + +Example: isympy -t python + +\fITYPE\fR must be one of 'gmpy', +\&'gmpy1' or 'python'. +.TP +\*(T<\fB\-o \fR\*(T>\fIORDER\fR, \*(T<\fB\-\-order=\fR\*(T>\fIORDER\fR +Setup the ordering of terms for printing. The default is lex, which +orders terms lexicographically (e.g., x**2 + x + 1). You can choose +other orderings, such as rev-lex, which will use reverse +lexicographic ordering (e.g., 1 + x + x**2). + +Note that for very large expressions, ORDER='none' may speed up +printing considerably, with the tradeoff that the order of the terms +in the printed expression will have no canonical order + +Example: isympy -o rev-lax + +\fIORDER\fR must be one of 'lex', 'rev-lex', 'grlex', +\&'rev-grlex', 'grevlex', 'rev-grevlex', 'old', or 'none'. +.TP +\*(T<\fB\-q\fR\*(T>, \*(T<\fB\-\-quiet\fR\*(T> +Print only Python's and SymPy's versions to stdout at startup, and nothing else. +.TP +\*(T<\fB\-d\fR\*(T>, \*(T<\fB\-\-doctest\fR\*(T> +Use the same format that should be used for doctests. This is +equivalent to '\fIisympy -c python -p no\fR'. +.TP +\*(T<\fB\-C\fR\*(T>, \*(T<\fB\-\-no\-cache\fR\*(T> +Disable the caching mechanism. Disabling the cache may slow certain +operations down considerably. This is useful for testing the cache, +or for benchmarking, as the cache can result in deceptive benchmark timings. + +This is the same as setting the environment variable SYMPY_USE_CACHE +to 'no'. +.TP +\*(T<\fB\-a\fR\*(T>, \*(T<\fB\-\-auto\fR\*(T> +Automatically create missing symbols. Normally, typing a name of a +Symbol that has not been instantiated first would raise NameError, +but with this option enabled, any undefined name will be +automatically created as a Symbol. This only works in IPython 0.11. + +Note that this is intended only for interactive, calculator style +usage. In a script that uses SymPy, Symbols should be instantiated +at the top, so that it's clear what they are. + +This will not override any names that are already defined, which +includes the single character letters represented by the mnemonic +QCOSINE (see the "Gotchas and Pitfalls" document in the +documentation). You can delete existing names by executing "del +name" in the shell itself. You can see if a name is defined by typing +"'name' in globals()". + +The Symbols that are created using this have default assumptions. +If you want to place assumptions on symbols, you should create them +using symbols() or var(). + +Finally, this only works in the top level namespace. So, for +example, if you define a function in isympy with an undefined +Symbol, it will not work. +.TP +\*(T<\fB\-D\fR\*(T>, \*(T<\fB\-\-debug\fR\*(T> +Enable debugging output. This is the same as setting the +environment variable SYMPY_DEBUG to 'True'. The debug status is set +in the variable SYMPY_DEBUG within isympy. +.TP +-- \fIPYTHONOPTIONS\fR +These options will be passed on to \fIipython (1)\fR shell. +Only supported when ipython is being used (standard python shell not supported). + +Two dashes (--) are required to separate \fIPYTHONOPTIONS\fR +from the other isympy options. + +For example, to run iSymPy without startup banner and colors: + +isympy -q -c ipython -- --colors=NoColor +.TP +\*(T<\fB\-h\fR\*(T>, \*(T<\fB\-\-help\fR\*(T> +Print help output and exit. +.TP +\*(T<\fB\-v\fR\*(T>, \*(T<\fB\-\-version\fR\*(T> +Print isympy version information and exit. +.SH FILES +.TP +\*(T<\fI${HOME}/.sympy\-history\fR\*(T> +Saves the history of commands when using the python +shell as backend. +.SH BUGS +The upstreams BTS can be found at \(lahttps://github.com/sympy/sympy/issues\(ra +Please report all bugs that you find in there, this will help improve +the overall quality of SymPy. +.SH "SEE ALSO" +\fBipython\fR(1), \fBpython\fR(1) diff --git a/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c20be76c7476ea998872511afa4449f41d8bb832 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/METADATA @@ -0,0 +1,202 @@ +Metadata-Version: 2.1 +Name: attrs +Version: 23.2.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Requires-Dist: importlib-metadata; python_version < '3.8' +Provides-Extra: cov +Requires-Dist: attrs[tests]; extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Provides-Extra: dev +Requires-Dist: attrs[tests]; extra == 'dev' +Requires-Dist: pre-commit; extra == 'dev' +Provides-Extra: docs +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier; extra == 'docs' +Requires-Dist: zope-interface; extra == 'docs' +Provides-Extra: tests +Requires-Dist: attrs[tests-no-zope]; extra == 'tests' +Requires-Dist: zope-interface; extra == 'tests' +Provides-Extra: tests-mypy +Requires-Dist: mypy>=1.6; (platform_python_implementation == 'CPython' and python_version >= '3.8') and extra == 'tests-mypy' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.8') and extra == 'tests-mypy' +Provides-Extra: tests-no-zope +Requires-Dist: attrs[tests-mypy]; extra == 'tests-no-zope' +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests-no-zope' +Requires-Dist: hypothesis; extra == 'tests-no-zope' +Requires-Dist: pympler; extra == 'tests-no-zope' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests-no-zope' +Requires-Dist: pytest>=4.3.0; extra == 'tests-no-zope' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + +

+ + + +

+ +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +**Hate type annotations**!? +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Please check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for a more in-depth explanation. + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes). + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: please use the `python-attrs` tag on [StackOverflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the Tidelift Subscription. + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +[Learn more.](https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo) + +## Release Information + +### Changes + +- The type annotation for `attrs.resolve_types()` is now correct. + [#1141](https://github.com/python-attrs/attrs/issues/1141) +- Type stubs now use `typing.dataclass_transform` to decorate dataclass-like decorators, instead of the non-standard `__dataclass_transform__` special form, which is only supported by Pyright. + [#1158](https://github.com/python-attrs/attrs/issues/1158) +- Fixed serialization of namedtuple fields using `attrs.asdict/astuple()` with `retain_collection_types=True`. + [#1165](https://github.com/python-attrs/attrs/issues/1165) +- `attrs.AttrsInstance` is now a `typing.Protocol` in both type hints and code. + This allows you to subclass it along with another `Protocol`. + [#1172](https://github.com/python-attrs/attrs/issues/1172) +- If *attrs* detects that `__attrs_pre_init__` accepts more than just `self`, it will call it with the same arguments as `__init__` was called. + This allows you to, for example, pass arguments to `super().__init__()`. + [#1187](https://github.com/python-attrs/attrs/issues/1187) +- Slotted classes now transform `functools.cached_property` decorated methods to support equivalent semantics. + [#1200](https://github.com/python-attrs/attrs/issues/1200) +- Added *class_body* argument to `attrs.make_class()` to provide additional attributes for newly created classes. + It is, for example, now possible to attach methods. + [#1203](https://github.com/python-attrs/attrs/issues/1203) + + +--- + +[Full changelog](https://www.attrs.org/en/stable/changelog.html) diff --git a/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3a2bd156bda51407cb0e87fee5d1443a4a37071e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=WlXJN6ICB0Y_HZ0lmuTUgia0kuSdn2p67d4N6cYxNZM,3307 +attr/__init__.pyi,sha256=u08EujYHy_rSyebNn-I9Xv2S_cXmtA9xWGc0cBsyl18,16976 +attr/__pycache__/__init__.cpython-310.pyc,, +attr/__pycache__/_cmp.cpython-310.pyc,, +attr/__pycache__/_compat.cpython-310.pyc,, +attr/__pycache__/_config.cpython-310.pyc,, +attr/__pycache__/_funcs.cpython-310.pyc,, +attr/__pycache__/_make.cpython-310.pyc,, +attr/__pycache__/_next_gen.cpython-310.pyc,, +attr/__pycache__/_version_info.cpython-310.pyc,, +attr/__pycache__/converters.cpython-310.pyc,, +attr/__pycache__/exceptions.cpython-310.pyc,, +attr/__pycache__/filters.cpython-310.pyc,, +attr/__pycache__/setters.cpython-310.pyc,, +attr/__pycache__/validators.cpython-310.pyc,, +attr/_cmp.py,sha256=OQZlWdFX74z18adGEUp40Ojqm0NNu1Flqnv2JE8B2ng,4025 +attr/_cmp.pyi,sha256=sGQmOM0w3_K4-X8cTXR7g0Hqr290E8PTObA9JQxWQqc,399 +attr/_compat.py,sha256=QmRyxii295wcQfaugWqxuIumAPsNQ2-RUF82QZPqMKw,2540 +attr/_config.py,sha256=z81Vt-GeT_2taxs1XZfmHx9TWlSxjPb6eZH1LTGsS54,843 +attr/_funcs.py,sha256=VBTUFKLklsmqxys3qWSTK_Ac9Z4s0mAJWwgW9nA7Llk,17173 +attr/_make.py,sha256=LnVy2e0HygoqaZknhC19z7JmOt7qGkAadf2LZgWVJWI,101923 +attr/_next_gen.py,sha256=as1voi8siAI_o2OQG8YIiZvmn0G7-S3_j_774rnoZ_g,6203 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=Kyw5MY0yfnUR_RwN1Vydf0EiE---htDxOgSc_-NYL6A,3622 +attr/converters.pyi,sha256=jKlpHBEt6HVKJvgrMFJRrHq8p61GXg4-Nd5RZWKJX7M,406 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=9pYvXqdg6mtLvKIIb56oALRMoHFnQTcGCO4EXTc1qyM,1470 +attr/filters.pyi,sha256=0mRCjLKxdcvAo0vD-Cr81HfRXXCp9j_cAXjOoAHtPGM,225 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400 +attr/setters.pyi,sha256=pyY8TVNBu8TWhOldv_RxHzmGvdgFQH981db70r0fn5I,567 +attr/validators.py,sha256=LGVpbiNg_KGzYrKUD5JPiZkx8TMfynDZGoQoLJNCIMo,19676 +attr/validators.pyi,sha256=167Dl9nt7NUhE9wht1I-buo039qyUT1nEUT_nKjSWr4,2580 +attrs-23.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-23.2.0.dist-info/METADATA,sha256=WwvG7OHyKjEPpyFUZCCYt1n0E_CcqdRb7bliGEdcm-A,9531 +attrs-23.2.0.dist-info/RECORD,, +attrs-23.2.0.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87 +attrs-23.2.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=9_5waVbFs7rLqtXZ73tNDrxhezyZ8VZeX4BbvQ3EeJw,1039 +attrs/__init__.pyi,sha256=s_ajQ_U14DOsOz0JbmAKDOi46B3v2PcdO0UAV1MY6Ek,2168 +attrs/__pycache__/__init__.cpython-310.pyc,, +attrs/__pycache__/converters.cpython-310.pyc,, +attrs/__pycache__/exceptions.cpython-310.pyc,, +attrs/__pycache__/filters.cpython-310.pyc,, +attrs/__pycache__/setters.cpython-310.pyc,, +attrs/__pycache__/validators.cpython-310.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2860816abecb3c5ad7da6d018a4334b87e6b6cfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2bd6453d255e19b973f19b128596a8b6dd65b2c3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/attrs-23.2.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e466b0dfda14f3a7c8ece512937eb99c8b7b6d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2016 Wenzel Jakob , All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of +external contributions to this project including patches, pull requests, etc. diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..da03c107ad92ade6a1ee86dc766b9892bbd4f85e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/METADATA @@ -0,0 +1,220 @@ +Metadata-Version: 2.1 +Name: pybind11 +Version: 2.12.0 +Summary: Seamless operability between C++11 and Python +Home-page: https://github.com/pybind/pybind11 +Download-URL: https://github.com/pybind/pybind11/tarball/v2.12.0 +Author: Wenzel Jakob +Author-email: wenzel.jakob@epfl.ch +License: BSD +Project-URL: Documentation, https://pybind11.readthedocs.io/ +Project-URL: Bug Tracker, https://github.com/pybind/pybind11/issues +Project-URL: Discussions, https://github.com/pybind/pybind11/discussions +Project-URL: Changelog, https://pybind11.readthedocs.io/en/latest/changelog.html +Project-URL: Chat, https://gitter.im/pybind/Lobby +Keywords: C++11,Python bindings +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Utilities +Classifier: Programming Language :: C++ +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: C++ +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.6 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: global +Requires-Dist: pybind11-global ==2.12.0 ; extra == 'global' + +.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png + :alt: pybind11 logo + +**pybind11 — Seamless operability between C++11 and Python** + +|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status| + +|Repology| |PyPI package| |Conda-forge| |Python Versions| + +`Setuptools example `_ +• `Scikit-build example `_ +• `CMake example `_ + +.. start + + +**pybind11** is a lightweight header-only library that exposes C++ types +in Python and vice versa, mainly to create Python bindings of existing +C++ code. Its goals and syntax are similar to the excellent +`Boost.Python `_ +library by David Abrahams: to minimize boilerplate code in traditional +extension modules by inferring type information using compile-time +introspection. + +The main issue with Boost.Python—and the reason for creating such a +similar project—is Boost. Boost is an enormously large and complex suite +of utility libraries that works with almost every C++ compiler in +existence. This compatibility has its cost: arcane template tricks and +workarounds are necessary to support the oldest and buggiest of compiler +specimens. Now that C++11-compatible compilers are widely available, +this heavy machinery has become an excessively large and unnecessary +dependency. + +Think of this library as a tiny self-contained version of Boost.Python +with everything stripped away that isn't relevant for binding +generation. Without comments, the core header files only require ~4K +lines of code and depend on Python (3.6+, or PyPy) and the C++ +standard library. This compact implementation was possible thanks to +some C++11 language features (specifically: tuples, lambda functions and +variadic templates). Since its creation, this library has grown beyond +Boost.Python in many ways, leading to dramatically simpler binding code in many +common situations. + +Tutorial and reference documentation is provided at +`pybind11.readthedocs.io `_. +A PDF version of the manual is available +`here `_. +And the source code is always available at +`github.com/pybind/pybind11 `_. + + +Core features +------------- + + +pybind11 can map the following core C++ features to Python: + +- Functions accepting and returning custom data structures per value, + reference, or pointer +- Instance methods and static methods +- Overloaded functions +- Instance attributes and static attributes +- Arbitrary exception types +- Enumerations +- Callbacks +- Iterators and ranges +- Custom operators +- Single and multiple inheritance +- STL data structures +- Smart pointers with reference counting like ``std::shared_ptr`` +- Internal references with correct reference counting +- C++ classes with virtual (and pure virtual) methods can be extended + in Python +- Integrated NumPy support (NumPy 2 requires pybind11 2.12+) + +Goodies +------- + +In addition to the core functionality, pybind11 provides some extra +goodies: + +- Python 3.6+, and PyPy3 7.3 are supported with an implementation-agnostic + interface (pybind11 2.9 was the last version to support Python 2 and 3.5). + +- It is possible to bind C++11 lambda functions with captured + variables. The lambda capture data is stored inside the resulting + Python function object. + +- pybind11 uses C++11 move constructors and move assignment operators + whenever possible to efficiently transfer custom data types. + +- It's easy to expose the internal storage of custom data types through + Pythons' buffer protocols. This is handy e.g. for fast conversion + between C++ matrix classes like Eigen and NumPy without expensive + copy operations. + +- pybind11 can automatically vectorize functions so that they are + transparently applied to all entries of one or more NumPy array + arguments. + +- Python's slice-based access and assignment operations can be + supported with just a few lines of code. + +- Everything is contained in just a few header files; there is no need + to link against any additional libraries. + +- Binaries are generally smaller by a factor of at least 2 compared to + equivalent bindings generated by Boost.Python. A recent pybind11 + conversion of PyRosetta, an enormous Boost.Python binding project, + `reported `_ + a binary size reduction of **5.4x** and compile time reduction by + **5.8x**. + +- Function signatures are precomputed at compile time (using + ``constexpr``), leading to smaller binaries. + +- With little extra effort, C++ types can be pickled and unpickled + similar to regular Python objects. + +Supported compilers +------------------- + +1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or + newer) +2. GCC 4.8 or newer +3. Microsoft Visual Studio 2017 or newer +4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI) +5. Cygwin/GCC (previously tested on 2.5.1) +6. NVCC (CUDA 11.0 tested in CI) +7. NVIDIA PGI (20.9 tested in CI) + +About +----- + +This project was created by `Wenzel +Jakob `_. Significant features and/or +improvements to the code were contributed by Jonas Adler, Lori A. Burns, +Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel +Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov, Johan Mabille, Tomasz Miąsko, +Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim +Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart. + +We thank Google for a generous financial contribution to the continuous +integration infrastructure used by this project. + + +Contributing +~~~~~~~~~~~~ + +See the `contributing +guide `_ +for information on building and contributing to pybind11. + +License +~~~~~~~ + +pybind11 is provided under a BSD-style license that can be found in the +`LICENSE `_ +file. By using, distributing, or contributing to this project, you agree +to the terms and conditions of this license. + +.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest + :target: http://pybind11.readthedocs.org/en/latest +.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg + :target: http://pybind11.readthedocs.org/en/stable +.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg + :target: https://gitter.im/pybind/Lobby +.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg + :target: https://github.com/pybind/pybind11/actions +.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true + :target: https://ci.appveyor.com/project/wjakob/pybind11 +.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg + :target: https://pypi.org/project/pybind11/ +.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg + :target: https://github.com/conda-forge/pybind11-feedstock +.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg + :target: https://repology.org/project/python:pybind11/versions +.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg + :target: https://pypi.org/project/pybind11/ +.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github + :target: https://github.com/pybind/pybind11/discussions diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..d5eb6f437e3e2688a84e2409b1637206bb30b854 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/RECORD @@ -0,0 +1,60 @@ +../../../bin/pybind11-config,sha256=9In58X_2imzkxDEvtj63l0Y-g0CI8F3adUpjagj-yJ0,250 +pybind11-2.12.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pybind11-2.12.0.dist-info/LICENSE,sha256=g5ZbhDuY9nDTqFvQQe1LNyyOxQ17SlmVqDrGl7pnXcs,1684 +pybind11-2.12.0.dist-info/METADATA,sha256=MeLhLdk3U_NbRXD34xF1zQZbkETbU5TPjOB7QROSUh0,9514 +pybind11-2.12.0.dist-info/RECORD,, +pybind11-2.12.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +pybind11-2.12.0.dist-info/entry_points.txt,sha256=Q_kAwEJBDz8wHD0V50hY3AvchDk3Pfyeox2YHrAcWZ0,105 +pybind11-2.12.0.dist-info/top_level.txt,sha256=d1mqwSpUlmlZhXDQ9Y57eNlXc3dVDM1toKmfC1kJbvU,9 +pybind11/__init__.py,sha256=4-WhH9Ac6P8D_FqnflpOch8XlaZrkXbe95FspbMvwu0,429 +pybind11/__main__.py,sha256=ATLlhFlhBxDXxxXEfnf2F1RcRhuWN1ziMwbmrGuhif0,1544 +pybind11/__pycache__/__init__.cpython-310.pyc,, +pybind11/__pycache__/__main__.cpython-310.pyc,, +pybind11/__pycache__/_version.cpython-310.pyc,, +pybind11/__pycache__/commands.cpython-310.pyc,, +pybind11/__pycache__/setup_helpers.cpython-310.pyc,, +pybind11/_version.py,sha256=v3xaYVbYgxyXxYRPjYCaanioPokR2maNxXiY2U439Nk,228 +pybind11/commands.py,sha256=iJBFWhXHqlC_JMAgMjMIn6H_hizvorS572upGU1roGA,1207 +pybind11/include/pybind11/attr.h,sha256=QPjH7BfhL8QFwHHkrDak8gNOLMlb1itAO5fobjdoLp8,24334 +pybind11/include/pybind11/buffer_info.h,sha256=m_VE_hfWPKl-KgUZy9aVQdPg1xtoaDaBgkurIX7aGig,7750 +pybind11/include/pybind11/cast.h,sha256=j5UvHFBOE3o-8kB2UcBNumV-dv9pLWn2Gf1uh-fz7pY,71139 +pybind11/include/pybind11/chrono.h,sha256=A23naeloqn-1NKVAABOsJtHU9Vz8lfvrAICuLk-7qBM,8458 +pybind11/include/pybind11/common.h,sha256=ATg9Bt1pwF8qnNuI086fprM4CUTdrZdk_g2HXE1Sf6A,120 +pybind11/include/pybind11/complex.h,sha256=AaDZ-rEmK4tFaue-K9P5y3TxxnaQF6JwZ_6LAzkdLQI,2096 +pybind11/include/pybind11/detail/class.h,sha256=J3yQxEpB9cg68riM3WnR5W9mzxraCJxmgQyHvONPPSM,28563 +pybind11/include/pybind11/detail/common.h,sha256=ww8qY6xFAjDhwTN8R3z-f4KI9itmVRRwG4H5vxYEfk0,53771 +pybind11/include/pybind11/detail/descr.h,sha256=k1nvytx1zhMh8ERL2xS8Unbxcio5fa7eZIqnTsZ0orE,5962 +pybind11/include/pybind11/detail/init.h,sha256=xJ_nyNwZh1j_a0d8K9fCloZ0-MIfh4X_vHja4CFwVF0,17858 +pybind11/include/pybind11/detail/internals.h,sha256=j0CmJRrMvSLOHFxn5yeq5lqTqBcjSoA0kT0v_VvgmgM,29033 +pybind11/include/pybind11/detail/type_caster_base.h,sha256=9AmJNWNFnbAmlty11TZEj4dcIDBItN_5EbHz3beDenE,49892 +pybind11/include/pybind11/detail/typeid.h,sha256=jw5pr9m72vkDsloT8vxl9wj17VJGcEdXDyziBlt89Js,1625 +pybind11/include/pybind11/eigen.h,sha256=-HmSA1kgwCQ-GHUt7PHtTEc-vxqw9xARpF8PHWJip28,316 +pybind11/include/pybind11/eigen/common.h,sha256=dIeqmK7IzW5K4k2larPnA1A863rDp38U9YbNIwiIyYk,378 +pybind11/include/pybind11/eigen/matrix.h,sha256=CS8NpkZI8Y8ty0NFQC7GZcUlM5o8_1Abv1GbGltsbkA,32135 +pybind11/include/pybind11/eigen/tensor.h,sha256=U7wM4vClaDAwWCKAqwmsCPiA2B3rAszIT3tV_yQusUw,18490 +pybind11/include/pybind11/embed.h,sha256=xD-oEg56PadTig9a8FOcMgbsL64jaie7hwG3y6DWPEI,13459 +pybind11/include/pybind11/eval.h,sha256=7re-O2Eor1yD0Q_KgFkHIjKD17ejzII687Yszl9_KfE,4731 +pybind11/include/pybind11/functional.h,sha256=XY1Rj5_x2nb9AT0OzB9skt6OMOn6klNSkT0uBrRIkLo,5051 +pybind11/include/pybind11/gil.h,sha256=IAR_w0RupvFS5bLfw66ZV91OE9WC1p1ztOFSaxHGvZo,8517 +pybind11/include/pybind11/gil_safe_call_once.h,sha256=tPoJICumDjCcfFsFkltDGLj7c42NbgdhSt0ERkrSGKQ,3876 +pybind11/include/pybind11/iostream.h,sha256=K5rPXoCYN325r1PptcJCIhPhgtRtTJQjMr7bvUIOwxk,8862 +pybind11/include/pybind11/numpy.h,sha256=iaVp3boyb4GkVgY2vgBXbFaLwoHPb6rmSlOM44-eFU4,84243 +pybind11/include/pybind11/operators.h,sha256=224RoAXcv1la4NNY9rQ3aD_AeC8S9ZKx3HVK1O8B4MU,9103 +pybind11/include/pybind11/options.h,sha256=qXvmnj--9fZSp56NYefnB3W5V17ppHlY1Srgo3DNBpw,2734 +pybind11/include/pybind11/pybind11.h,sha256=zwcJLUvVmiZPpzvkt0Lu9IysI5Xs1ptCw9Y7C689jJU,129569 +pybind11/include/pybind11/pytypes.h,sha256=ehwy0s9uSGkByshl2l90nd25D0Mop3RNY09JTRkHUME,98953 +pybind11/include/pybind11/stl.h,sha256=aMi1OCCw2Zb-IRLSlAtQEJJHtWsRJiLT9dKDMHST1Ic,15532 +pybind11/include/pybind11/stl/filesystem.h,sha256=refLexmdcqOM6Qjo9QMB6heA5bQ7GZrP6DCvVBv0R1M,4185 +pybind11/include/pybind11/stl_bind.h,sha256=TA3A3guojho4GWsaP8SQfqbphF_HJ62-Sj2M8-CnxVA,28472 +pybind11/include/pybind11/type_caster_pyobject_ptr.h,sha256=H7pKBYTvUlibiJQEcKmeAkygSQwoCkuIyukNSDmVq-U,1929 +pybind11/include/pybind11/typing.h,sha256=rnjXxUTOp6EKJ4bwGCNV5Jortun-gBezC5s4SH-o8Yw,3600 +pybind11/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pybind11/setup_helpers.py,sha256=DZfrJeCTrHZDUpYVui7BDntZYtIp65UUQiVg8__Xd3Q,17492 +pybind11/share/cmake/pybind11/FindPythonLibsNew.cmake,sha256=5yX3-6c9CpjqBM4NfgwqK91XgOI-0hOb5sPOMKMuigA,12183 +pybind11/share/cmake/pybind11/pybind11Common.cmake,sha256=BlxskJajbjfLdkGSX4jO05tFCH4m96W32TjHiiiwt4Q,15032 +pybind11/share/cmake/pybind11/pybind11Config.cmake,sha256=4leRiHT_QZBFTnZsZKCkh70sjOd-odOWzomPnep-5HE,7952 +pybind11/share/cmake/pybind11/pybind11ConfigVersion.cmake,sha256=saYag2OKwhcTsAhc0dpS3o88VFvY7GDyx8gGf8N0-ss,1403 +pybind11/share/cmake/pybind11/pybind11NewTools.cmake,sha256=j9cUliW9A46IzvMzLv9WtMgz5EblY4Hqda_1SY99INY,10970 +pybind11/share/cmake/pybind11/pybind11Targets.cmake,sha256=ymsjrVrcUP03W9M-HBX_mpIQSb8ZE4aDCGKDIoTXvd8,4271 +pybind11/share/cmake/pybind11/pybind11Tools.cmake,sha256=34Dh2KSsg2FqkTzfDjjxNKOBr7kMy8_Gg3fkP_DCO_E,8569 +pybind11/share/pkgconfig/pybind11.pc,sha256=R0BoqMlsJGZAxWTVSQZsCvJC3UcjW4cZzpvhKmSprDA,171 diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..8de5a647622cd4cbe433550be7a6b91be72e304a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/entry_points.txt @@ -0,0 +1,5 @@ +[console_scripts] +pybind11-config = pybind11.__main__:main + +[pipx.run] +pybind11 = pybind11.__main__:main diff --git a/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e47c59fd7ced4c11e813c3ef82d919443a5f1d33 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pybind11-2.12.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pybind11 diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..353924be0e59b9ad7e6c22848c2189398481821d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/LICENSE @@ -0,0 +1,19 @@ +Copyright Jason R. Coombs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8e323f4e92af7e3765878dc0d145359389cb41d1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/METADATA @@ -0,0 +1,124 @@ +Metadata-Version: 2.1 +Name: setuptools +Version: 59.6.0 +Summary: Easily download, build, install, upgrade, and uninstall Python packages +Home-page: https://github.com/pypa/setuptools +Author: Python Packaging Authority +Author-email: distutils-sig@python.org +License: UNKNOWN +Project-URL: Documentation, https://setuptools.pypa.io/ +Keywords: CPAN PyPI distutils eggs package management +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Archiving :: Packaging +Classifier: Topic :: System :: Systems Administration +Classifier: Topic :: Utilities +Requires-Python: >=3.6 +License-File: LICENSE +Provides-Extra: certs +Provides-Extra: docs +Requires-Dist: furo ; extra == 'docs' +Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs' +Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs' +Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs' +Requires-Dist: rst.linker (>=1.9) ; extra == 'docs' +Requires-Dist: sphinx ; extra == 'docs' +Requires-Dist: sphinx-inline-tabs ; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier ; extra == 'docs' +Provides-Extra: ssl +Provides-Extra: testing +Requires-Dist: flake8-2020 ; extra == 'testing' +Requires-Dist: jaraco.envs (>=2.2) ; extra == 'testing' +Requires-Dist: jaraco.path (>=3.2.0) ; extra == 'testing' +Requires-Dist: mock ; extra == 'testing' +Requires-Dist: paver ; extra == 'testing' +Requires-Dist: pip (>=19.1) ; extra == 'testing' +Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing' +Requires-Dist: pytest-cov ; extra == 'testing' +Requires-Dist: pytest-enabler (>=1.0.1) ; extra == 'testing' +Requires-Dist: pytest-flake8 ; extra == 'testing' +Requires-Dist: pytest-virtualenv (>=1.2.7) ; extra == 'testing' +Requires-Dist: pytest-xdist ; extra == 'testing' +Requires-Dist: pytest (>=6) ; extra == 'testing' +Requires-Dist: sphinx ; extra == 'testing' +Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing' +Requires-Dist: wheel ; extra == 'testing' +Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing' +Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing' + +.. image:: https://raw.githubusercontent.com/pypa/setuptools/main/docs/images/banner-640x320.svg + :align: center + +| + +.. image:: https://img.shields.io/pypi/v/setuptools.svg + :target: `PyPI link`_ + +.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg + :target: `PyPI link`_ + +.. _PyPI link: https://pypi.org/project/setuptools + +.. image:: https://github.com/pypa/setuptools/workflows/tests/badge.svg + :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22tests%22 + :alt: tests + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: Code style: Black + +.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg + :target: https://setuptools.pypa.io + +.. image:: https://img.shields.io/badge/skeleton-2021-informational + :target: https://blog.jaraco.com/skeleton + +.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white + :target: https://codecov.io/gh/pypa/setuptools + +.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat + :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme + +See the `Installation Instructions +`_ in the Python Packaging +User's Guide for instructions on installing, upgrading, and uninstalling +Setuptools. + +Questions and comments should be directed to the `distutils-sig +mailing list `_. +Bug reports and especially tested patches may be +submitted directly to the `bug tracker +`_. + + +Code of Conduct +=============== + +Everyone interacting in the setuptools project's codebases, issue trackers, +chat rooms, and mailing lists is expected to follow the +`PSF Code of Conduct `_. + + +For Enterprise +============== + +Available as part of the Tidelift Subscription. + +Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use. + +`Learn more `_. + + +Security Contact +================ + +To report a security vulnerability, please use the +`Tidelift security contact `_. +Tidelift will coordinate the fix and disclosure. + + diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..def46084e349fbb8f517ba0ecbc39747f3f761cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/RECORD @@ -0,0 +1,298 @@ +_distutils_hack/__init__.py,sha256=TCUx2qEhWNyruLzj4DOGZAQH39hm2fJ_wDd90olNOmo,3759 +_distutils_hack/__pycache__/__init__.cpython-310.pyc,, +_distutils_hack/__pycache__/override.cpython-310.pyc,, +_distutils_hack/override.py,sha256=Eu_s-NF6VIZ4Cqd0tbbA5wtWky2IZPNd8et6GLt1mzo,44 +distutils-precedence.pth,sha256=fqf_7z_ioRfuEsaO1lU2F_DX_S8FkCV8JcSElZo7c3M,152 +pkg_resources/__init__.py,sha256=uAnPq8FsTXHAEHFWK7UU9AhdNjE4o5Skfk8CyfbztO8,108573 +pkg_resources/__pycache__/__init__.cpython-310.pyc,, +pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pkg_resources/_vendor/__pycache__/__init__.cpython-310.pyc,, +pkg_resources/_vendor/__pycache__/appdirs.cpython-310.pyc,, +pkg_resources/_vendor/__pycache__/pyparsing.cpython-310.pyc,, +pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 +pkg_resources/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661 +pkg_resources/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 +pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/markers.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/tags.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/utils.cpython-310.pyc,, +pkg_resources/_vendor/packaging/__pycache__/version.cpython-310.pyc,, +pkg_resources/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 +pkg_resources/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378 +pkg_resources/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629 +pkg_resources/_vendor/packaging/markers.py,sha256=gFSKoBTb0sKDw1v_apJy15lPr0v2mEvuEkfooTtcWx4,8496 +pkg_resources/_vendor/packaging/requirements.py,sha256=uJ4cjwm3_nrfHJLCcGU9mT5aw8SXfw8v1aBUD7OFuVs,4706 +pkg_resources/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964 +pkg_resources/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710 +pkg_resources/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 +pkg_resources/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 +pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +pkg_resources/extern/__init__.py,sha256=3PixaT9Tzzd4NoyV6CVhGd7S_9Z-U5yvMWAftZKvC6k,2362 +pkg_resources/extern/__pycache__/__init__.cpython-310.pyc,, +pkg_resources/tests/data/my-test-package-source/__pycache__/setup.cpython-310.pyc,, +pkg_resources/tests/data/my-test-package-source/setup.py,sha256=Mrezl3nqxkYkjCYpIxmjhhg4AR8hgi4QZdEYmk-I7R8,104 +setuptools-59.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +setuptools-59.6.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050 +setuptools-59.6.0.dist-info/METADATA,sha256=wis8J-_8PwCf5xGTjZ520vMjGCF94516nC1ml1ebyX4,4963 +setuptools-59.6.0.dist-info/RECORD,, +setuptools-59.6.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +setuptools-59.6.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +setuptools-59.6.0.dist-info/entry_points.txt,sha256=wpnhLrbtyk4hZ1qCCw48cCSxoQPzULMhIuaFqsB7GxQ,2636 +setuptools-59.6.0.dist-info/top_level.txt,sha256=1Euo4fJMWPMc6iG8BrvoHx4c65FnzA7Mv_p3en0BDds,48 +setuptools/__init__.py,sha256=l7ULo8jGk-4-8jbacmJ58cYpSRX4swS1ccbJaJVAGdM,7448 +setuptools/__pycache__/__init__.cpython-310.pyc,, +setuptools/__pycache__/_deprecation_warning.cpython-310.pyc,, +setuptools/__pycache__/_imp.cpython-310.pyc,, +setuptools/__pycache__/archive_util.cpython-310.pyc,, +setuptools/__pycache__/build_meta.cpython-310.pyc,, +setuptools/__pycache__/config.cpython-310.pyc,, +setuptools/__pycache__/dep_util.cpython-310.pyc,, +setuptools/__pycache__/depends.cpython-310.pyc,, +setuptools/__pycache__/dist.cpython-310.pyc,, +setuptools/__pycache__/errors.cpython-310.pyc,, +setuptools/__pycache__/extension.cpython-310.pyc,, +setuptools/__pycache__/glob.cpython-310.pyc,, +setuptools/__pycache__/installer.cpython-310.pyc,, +setuptools/__pycache__/launch.cpython-310.pyc,, +setuptools/__pycache__/monkey.cpython-310.pyc,, +setuptools/__pycache__/msvc.cpython-310.pyc,, +setuptools/__pycache__/namespaces.cpython-310.pyc,, +setuptools/__pycache__/package_index.cpython-310.pyc,, +setuptools/__pycache__/py34compat.cpython-310.pyc,, +setuptools/__pycache__/sandbox.cpython-310.pyc,, +setuptools/__pycache__/unicode_utils.cpython-310.pyc,, +setuptools/__pycache__/version.cpython-310.pyc,, +setuptools/__pycache__/wheel.cpython-310.pyc,, +setuptools/__pycache__/windows_support.cpython-310.pyc,, +setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218 +setuptools/_distutils/__init__.py,sha256=3YtkfadGoU57VMEQFk2TNyMZVud1kDkakWQLhWg2Fm8,536 +setuptools/_distutils/__pycache__/__init__.cpython-310.pyc,, +setuptools/_distutils/__pycache__/_msvccompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/archive_util.cpython-310.pyc,, +setuptools/_distutils/__pycache__/bcppcompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/ccompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/cmd.cpython-310.pyc,, +setuptools/_distutils/__pycache__/config.cpython-310.pyc,, +setuptools/_distutils/__pycache__/core.cpython-310.pyc,, +setuptools/_distutils/__pycache__/cygwinccompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/debug.cpython-310.pyc,, +setuptools/_distutils/__pycache__/dep_util.cpython-310.pyc,, +setuptools/_distutils/__pycache__/dir_util.cpython-310.pyc,, +setuptools/_distutils/__pycache__/dist.cpython-310.pyc,, +setuptools/_distutils/__pycache__/errors.cpython-310.pyc,, +setuptools/_distutils/__pycache__/extension.cpython-310.pyc,, +setuptools/_distutils/__pycache__/fancy_getopt.cpython-310.pyc,, +setuptools/_distutils/__pycache__/file_util.cpython-310.pyc,, +setuptools/_distutils/__pycache__/filelist.cpython-310.pyc,, +setuptools/_distutils/__pycache__/log.cpython-310.pyc,, +setuptools/_distutils/__pycache__/msvc9compiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/msvccompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/py35compat.cpython-310.pyc,, +setuptools/_distutils/__pycache__/py38compat.cpython-310.pyc,, +setuptools/_distutils/__pycache__/spawn.cpython-310.pyc,, +setuptools/_distutils/__pycache__/sysconfig.cpython-310.pyc,, +setuptools/_distutils/__pycache__/text_file.cpython-310.pyc,, +setuptools/_distutils/__pycache__/unixccompiler.cpython-310.pyc,, +setuptools/_distutils/__pycache__/util.cpython-310.pyc,, +setuptools/_distutils/__pycache__/version.cpython-310.pyc,, +setuptools/_distutils/__pycache__/versionpredicate.cpython-310.pyc,, +setuptools/_distutils/_msvccompiler.py,sha256=jR0JM5A1JMnZ6xMDicQzhXWgXTVXs1lWAeUexC1z198,20813 +setuptools/_distutils/archive_util.py,sha256=qW-uiGwYexTvK5e-iSel_31Dshx-CqTanNPK6snwf98,8572 +setuptools/_distutils/bcppcompiler.py,sha256=OJDVpCUmX6H8v_7lV1zifV1fcx92Cr2dhiUh6989UJI,14894 +setuptools/_distutils/ccompiler.py,sha256=YbernlpGZZqKnfzZSfJ814fINca8cicZiUlBjyUPyaM,47644 +setuptools/_distutils/cmd.py,sha256=eco6LAGUtobLuPafuhmgKgkwRRL_WY8KJ4YeDCHpcls,18079 +setuptools/_distutils/command/__init__.py,sha256=2TA-rlNDlzeI-csbWHXFjGD8uOYqALMfyWOhT49nC6g,799 +setuptools/_distutils/command/__pycache__/__init__.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/bdist.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/bdist_dumb.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/bdist_msi.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/bdist_rpm.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/bdist_wininst.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/build.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/build_clib.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/build_ext.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/build_py.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/build_scripts.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/check.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/clean.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/config.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install_data.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install_egg_info.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install_headers.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install_lib.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/install_scripts.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/py37compat.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/register.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/sdist.cpython-310.pyc,, +setuptools/_distutils/command/__pycache__/upload.cpython-310.pyc,, +setuptools/_distutils/command/bdist.py,sha256=2z4eudRl_n7m3lG9leL0IYqes4bsm8c0fxfZuiafjMg,5562 +setuptools/_distutils/command/bdist_dumb.py,sha256=BTur9jcIppyP7Piavjfsk7YjElqvxeYO2npUyPPOekc,4913 +setuptools/_distutils/command/bdist_msi.py,sha256=EVFQYN_X-ExeeP8gmdV9JcINsuUGsLJUz9afMU0Rt8c,35579 +setuptools/_distutils/command/bdist_rpm.py,sha256=gjOw22GhDSbcq0bdq25cTb-n6HWWm0bShLQad_mkJ4k,21537 +setuptools/_distutils/command/bdist_wininst.py,sha256=iGlaI-VfElHOneeczKHWnSN5a10-7IMcJaXuR1mdS3c,16030 +setuptools/_distutils/command/build.py,sha256=1AF-dxN_NlOEyoydBz19AwpeWYPSYCZvOLJSN_PdatY,5773 +setuptools/_distutils/command/build_clib.py,sha256=bgVTHh28eLQA2Gkw68amApd_j7qQBX4MTI-zTvAK_J4,8022 +setuptools/_distutils/command/build_ext.py,sha256=KgxpopuD6sqep0LsumMH15joWih0VdbnXpYm-ETNjoE,31612 +setuptools/_distutils/command/build_py.py,sha256=hXesMrH_epNj6K8SUtJdipgEis3EdICKeZ8VWe_ndck,16495 +setuptools/_distutils/command/build_scripts.py,sha256=urdn6wPxPMW5dLqpqFkZ8dqaFG1tf9TiAao6U9LCoEI,5963 +setuptools/_distutils/command/check.py,sha256=5qDtI75ccZg3sAItQWeaIu8y3FR314O4rr9Smz4HsEo,5637 +setuptools/_distutils/command/clean.py,sha256=2TCt47ru4hZZM0RfVfUYj5bbpicpGLP4Qhw5jBtvp9k,2776 +setuptools/_distutils/command/config.py,sha256=2aTjww3PwjMB8-ZibCe4P7B-qG1hM1gn_rJXYyxRz6c,13117 +setuptools/_distutils/command/install.py,sha256=zX_OITRItDnNAv0iVjXdFVitf3f63tHzK_mZ1sIxsuc,28970 +setuptools/_distutils/command/install_data.py,sha256=YhGOAwh3gJPqF7em5XA0rmpR42z1bLh80ooElzDyUvk,2822 +setuptools/_distutils/command/install_egg_info.py,sha256=WijZ7cHMAkNMMCwrZ--KoqV9M2RtLouU4-qSbiCwv70,2753 +setuptools/_distutils/command/install_headers.py,sha256=XQ6idkbIDfr1ljXCOznuVUMvOFpHBn6cK0Wz9gIM2b4,1298 +setuptools/_distutils/command/install_lib.py,sha256=9AofR-MO9lAtjwwuukCptepOaJEKMZW2VHiyR5hU7HA,8397 +setuptools/_distutils/command/install_scripts.py,sha256=_CLUeQwGJRcY2kik7azPMn5IdtDCrjWdUvZ1khlG6ck,2017 +setuptools/_distutils/command/py37compat.py,sha256=qzRhhvTihqx_PZZt2ZYECxh1X3Oj255VqatzelYFAKw,671 +setuptools/_distutils/command/register.py,sha256=2jaq9968rt2puRVDBx1HbNiXv27uOk8idE_4lPf_3VM,11712 +setuptools/_distutils/command/sdist.py,sha256=qotJjAOzyhJjq2-oDImjNFrOtaSneEFDJTB-sEk1wnU,19005 +setuptools/_distutils/command/upload.py,sha256=BLO1w7eSAqsCjCLXtf_CRVSjwF1WmyOByGVGNdcQ8oY,7597 +setuptools/_distutils/config.py,sha256=dtHgblx9JhfyrKx1-J7Jlxw_f7s8ZbPFQii2UWMTZpY,4827 +setuptools/_distutils/core.py,sha256=0v7Emh9y0AW9o4AEjfVMhDxKzTFWFxUQn46spFSL56g,9282 +setuptools/_distutils/cygwinccompiler.py,sha256=MhRmF3G0-5doB6XqCuNCvHIXcgUva-OulDwJRAjZzHY,17330 +setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139 +setuptools/_distutils/dep_util.py,sha256=GuR9Iw_jzZRkyemJ5HX8rB_wRGxkIBcBm1qh54r7zhk,3491 +setuptools/_distutils/dir_util.py,sha256=UwhBOUTcV65GTwce4SPuTXR8Z8q3LYEcmttqcGb0bYo,7778 +setuptools/_distutils/dist.py,sha256=Biuf6ca8uiFfMScRFsYUKtb5neMPtxKxRtXn50_1f3U,50421 +setuptools/_distutils/errors.py,sha256=Yr6tKZGdzBoNi53vBtiq0UJ__X05CmxSdQJqOWaw6SY,3577 +setuptools/_distutils/extension.py,sha256=bTb3Q0CoevGKYv5dX1ls--Ln8tlB0-UEOsi9BwzlZ-s,10515 +setuptools/_distutils/fancy_getopt.py,sha256=OPxp2CxHi1Yp_d1D8JxW4Ueq9fC71tegQFaafh58GGU,17784 +setuptools/_distutils/file_util.py,sha256=0hUqfItN_x2DVihR0MHdA4KCMVCOO8VoByaFp_a6MDg,8148 +setuptools/_distutils/filelist.py,sha256=Z9f5hvepZnpniZ2IFmCnWIjdviWozs8sbARBhWajwoM,13407 +setuptools/_distutils/log.py,sha256=hWBmdUC2K927QcVv3REMW3HMPclxccPQngxLSuUXQl0,1969 +setuptools/_distutils/msvc9compiler.py,sha256=X623B92g0v8A3BEM9qpRf396AEd_hfjkfDUVTKu0hcE,30453 +setuptools/_distutils/msvccompiler.py,sha256=qruALeGRq8-CjtjE2tLQ8W26QnchcYedWzFme8AxZ4Q,23540 +setuptools/_distutils/py35compat.py,sha256=-sk1vBIsOgH-AobjIYbK_OEjdJF_54Ul_D1EiE9XM_c,455 +setuptools/_distutils/py38compat.py,sha256=II7ddBxOijC7uNN4z_46HYUjwYTJYMNiLJoGTormZm0,212 +setuptools/_distutils/spawn.py,sha256=4uE9k3VZWijxy7E_Rlcmh1MoamaPJ8rajdNBagKxjgU,3498 +setuptools/_distutils/sysconfig.py,sha256=k3fzINx3-qjge0udI6fC1UQSDPYpMGrxeSuV9cY4rmU,22151 +setuptools/_distutils/text_file.py,sha256=PsuAJeWdKJoLSV_6N6IpB5-0Pa84KzLUucJMFRazw3I,12483 +setuptools/_distutils/unixccompiler.py,sha256=u2Sfs6LRmqQux4nZW08GwDtoFMded6wYnkiaO2TvKC4,14538 +setuptools/_distutils/util.py,sha256=0v7B6nIsAXP11A7xqS6FC6lFAdaIqzxz_C-at4aMcgs,20655 +setuptools/_distutils/version.py,sha256=syRvPxuMQxnftpuIKeRE-2ELQ_ZMCwMJ-o8ie-lxdZo,13015 +setuptools/_distutils/versionpredicate.py,sha256=vx4ND3BtMgxFR9iZ4_t3WFa-NdIKxO8vtOd0twBppxc,5277 +setuptools/_imp.py,sha256=HmF91IbitRfsD5z-g4_wmcuH-RahyIONbPgiCOFgtzA,2392 +setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +setuptools/_vendor/__pycache__/__init__.cpython-310.pyc,, +setuptools/_vendor/__pycache__/ordered_set.cpython-310.pyc,, +setuptools/_vendor/__pycache__/pyparsing.cpython-310.pyc,, +setuptools/_vendor/more_itertools/__init__.py,sha256=C7sXffHTXM3P-iaLPPfqfmDoxOflQMJLcM7ed9p3jak,82 +setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-310.pyc,, +setuptools/_vendor/more_itertools/__pycache__/more.cpython-310.pyc,, +setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-310.pyc,, +setuptools/_vendor/more_itertools/more.py,sha256=DlZa8v6JihVwfQ5zHidOA-xDE0orcQIUyxVnCaUoDKE,117968 +setuptools/_vendor/more_itertools/recipes.py,sha256=UkNkrsZyqiwgLHANBTmvMhCvaNSvSNYhyOpz_Jc55DY,16256 +setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130 +setuptools/_vendor/packaging/__about__.py,sha256=IIRHpOsJlJSgkjq1UoeBoMTqhvNp3gN9FyMb5Kf8El4,661 +setuptools/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497 +setuptools/_vendor/packaging/__pycache__/__about__.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/__init__.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/_structures.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/markers.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/requirements.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/specifiers.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/tags.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/utils.cpython-310.pyc,, +setuptools/_vendor/packaging/__pycache__/version.cpython-310.pyc,, +setuptools/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488 +setuptools/_vendor/packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378 +setuptools/_vendor/packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629 +setuptools/_vendor/packaging/markers.py,sha256=lihRgqpZjLM-JW-vxlLPqU3kmVe79g9vypy1kxmTRuQ,8493 +setuptools/_vendor/packaging/requirements.py,sha256=Opd0FjqgdEiWkzBLyo1oLU0Dj01uIFwTAnAJQrr6j2A,4700 +setuptools/_vendor/packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964 +setuptools/_vendor/packaging/tags.py,sha256=vGybAUQYlPKMcukzX_2e65fmafnFFuMbD25naYTEwtc,15710 +setuptools/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200 +setuptools/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665 +setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055 +setuptools/archive_util.py,sha256=maJDbozRbDeSPw53VT0cb_IS3W0Ap73lJR8tX8RZDx0,7077 +setuptools/build_meta.py,sha256=hCU742vjgXHY6oKPYttBkie-n4DVNAJrUOgn0O_V3nc,10536 +setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 +setuptools/cli-arm64.exe,sha256=o9amxowudZ98NvNWh_a2DRY8LhoIRqTAekxABqltiMc,137216 +setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 +setuptools/command/__init__.py,sha256=e-8TJOikUe3St0fw2b2p9u5EDdSxl5zHUBJJKifbcQ8,217 +setuptools/command/__pycache__/__init__.cpython-310.pyc,, +setuptools/command/__pycache__/alias.cpython-310.pyc,, +setuptools/command/__pycache__/bdist_egg.cpython-310.pyc,, +setuptools/command/__pycache__/bdist_rpm.cpython-310.pyc,, +setuptools/command/__pycache__/build_clib.cpython-310.pyc,, +setuptools/command/__pycache__/build_ext.cpython-310.pyc,, +setuptools/command/__pycache__/build_py.cpython-310.pyc,, +setuptools/command/__pycache__/develop.cpython-310.pyc,, +setuptools/command/__pycache__/dist_info.cpython-310.pyc,, +setuptools/command/__pycache__/easy_install.cpython-310.pyc,, +setuptools/command/__pycache__/egg_info.cpython-310.pyc,, +setuptools/command/__pycache__/install.cpython-310.pyc,, +setuptools/command/__pycache__/install_egg_info.cpython-310.pyc,, +setuptools/command/__pycache__/install_lib.cpython-310.pyc,, +setuptools/command/__pycache__/install_scripts.cpython-310.pyc,, +setuptools/command/__pycache__/py36compat.cpython-310.pyc,, +setuptools/command/__pycache__/register.cpython-310.pyc,, +setuptools/command/__pycache__/rotate.cpython-310.pyc,, +setuptools/command/__pycache__/saveopts.cpython-310.pyc,, +setuptools/command/__pycache__/sdist.cpython-310.pyc,, +setuptools/command/__pycache__/setopt.cpython-310.pyc,, +setuptools/command/__pycache__/test.cpython-310.pyc,, +setuptools/command/__pycache__/upload.cpython-310.pyc,, +setuptools/command/__pycache__/upload_docs.cpython-310.pyc,, +setuptools/command/alias.py,sha256=1sLQxZcNh6dDQpDmm4G7UGGTol83nY1NTPmNBbm2siI,2381 +setuptools/command/bdist_egg.py,sha256=-upiB6fFtm8cQSQj1LRDVpG1-T143DsXCvV0fh03u7U,16604 +setuptools/command/bdist_rpm.py,sha256=PxrgoHPNaw2Pw2qNjjHDPC-Ay_IaDbCqP3d_5N-cj2A,1182 +setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415 +setuptools/command/build_ext.py,sha256=SNK042HfB2ezlDQbSVRGFqI1IM5A4AsjU1wpV3fgskE,13212 +setuptools/command/build_py.py,sha256=c90V1nVPEtYkdye-xvo-B48V5RLvSgD8JBMfPtUbtYw,8751 +setuptools/command/develop.py,sha256=5_Ss7ENd1_B_jVMY1tF5UV_y1Xu6jbVzAPG8oKeluGA,7012 +setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960 +setuptools/command/easy_install.py,sha256=oXURojITuMmOQ2ZcOJ_IKkm5ahyoT5tnA89jZ70CTds,87973 +setuptools/command/egg_info.py,sha256=5rV9PH6Eeics9xkpzx-nsTBL54S1S-Bf0r1liCtYPVI,26134 +setuptools/command/install.py,sha256=UynjFBgRyyHrDZRVAmXrXG0vChJAMx-sxnOO3JoAzVo,4906 +setuptools/command/install_egg_info.py,sha256=4zq_Ad3jE-EffParuyDEnvxU6efB-Xhrzdr8aB6Ln_8,3195 +setuptools/command/install_lib.py,sha256=4zK0nihAAwMYIoOS0UOBLZKSOCBbXXPKsTraO_a8qmk,5036 +setuptools/command/install_scripts.py,sha256=o0jN_ex7yYYk8W5clymTFOXwkFMKzW9q_zd9Npcex7M,2593 +setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 +setuptools/command/py36compat.py,sha256=7yLWzQj179Enx3pJ8V1cDDCzeLMFMd9XJXlK-iZTq5Y,4946 +setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468 +setuptools/command/rotate.py,sha256=SvsQPasezIojPjvMnfkqzh8P0U0tCj0daczF8uc3NQM,2128 +setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 +setuptools/command/sdist.py,sha256=2onJidYBPFpUgcX6J4KjZX5ilwciHPRB8VkID5YVaL0,6413 +setuptools/command/setopt.py,sha256=okxhqD1NM1nQlbSVDCNv6P7Y7g680sc2r-tUW7wPH1Y,5086 +setuptools/command/test.py,sha256=qGY-Hx1RPCndlVh2rsrEs5479CgmxRsrEflVLr98jVA,8088 +setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462 +setuptools/command/upload_docs.py,sha256=ba5kOyedD_u62weinrxqqnvpuQvBIuamXehJG6tAvO0,7218 +setuptools/config.py,sha256=O-T_28163qkEeaX8bLgqJLuOLYur15cC2_xpA0RENfM,23153 +setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949 +setuptools/depends.py,sha256=QYQIadr5DwLxPzkErhNt5hmRhvGhWxoXZMRXCm_jcQ0,5499 +setuptools/dist.py,sha256=73utfl0NHQ_Xfp5m3-wlbo7YaA31S_dkleh5P3GTKws,43162 +setuptools/errors.py,sha256=t4Rm85eXm71Ti0-PO1gAQMRK3V7NN3x1tcbcw0-xGSI,1555 +setuptools/extension.py,sha256=NMM46XjNdVelWemc0x8CyVKA5Ks6Zm3xTWSA2SS6xZM,1684 +setuptools/extern/__init__.py,sha256=Hhf9W73WAitw9TdRJfDIb6YFjmK56CF61afds1Mg0HY,2407 +setuptools/extern/__pycache__/__init__.cpython-310.pyc,, +setuptools/glob.py,sha256=1oZjbfjAHSXbgdhSuR6YGU8jKob9L8NtEmBYqcPTLYk,4873 +setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 +setuptools/gui-arm64.exe,sha256=TEFnOKDi-mq3ZszxqbCoCXTnM_lhUWjdIqBpr6fVs40,137728 +setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 +setuptools/installer.py,sha256=s6DQfsoICBJxbUqbduhOJtl1oG0S4yegRCg3EAs0i3M,3824 +setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812 +setuptools/monkey.py,sha256=0e3HdVKXHL415O7np-AUqhEFXPPuDdJKbI47chQ_DE4,5217 +setuptools/msvc.py,sha256=3LLt938e6OR7wWPzIvCQu7LCWZSIKqoKV6w3r8jV3kY,50561 +setuptools/namespaces.py,sha256=PMqGVPXPYQgjUTvEg9bGccRAkIODrQ6NmsDg_fwErwI,3093 +setuptools/package_index.py,sha256=egCu3CzZDtEwZL0psMfCkNJfkDryq1FgRkhFmr9rUPc,40103 +setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245 +setuptools/sandbox.py,sha256=mR83i-mu-ZUU_7TaMgYCeRSyzkqv8loJ_GR9xhS2DDw,14348 +setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218 +setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 +setuptools/unicode_utils.py,sha256=aOOFo4JGwAsiBttGYDsqFS7YqWQeZ2j6DWiCuctR_00,941 +setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 +setuptools/wheel.py,sha256=0P8tSk105uF_Ub-30N2HU2X2v7MKDSdjpeQlRRW3SkI,8288 +setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/REQUESTED b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..370203244ceae5a5b18aaa53f830adeb2b6bb795 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/top_level.txt @@ -0,0 +1,4 @@ +_distutils_hack +debian +pkg_resources +setuptools diff --git a/llmeval-env/share/man/man1/isympy.1 b/llmeval-env/share/man/man1/isympy.1 new file mode 100644 index 0000000000000000000000000000000000000000..0ff966158a28c5ad1a6cd954e454842b25fdd999 --- /dev/null +++ b/llmeval-env/share/man/man1/isympy.1 @@ -0,0 +1,188 @@ +'\" -*- coding: us-ascii -*- +.if \n(.g .ds T< \\FC +.if \n(.g .ds T> \\F[\n[.fam]] +.de URL +\\$2 \(la\\$1\(ra\\$3 +.. +.if \n(.g .mso www.tmac +.TH isympy 1 2007-10-8 "" "" +.SH NAME +isympy \- interactive shell for SymPy +.SH SYNOPSIS +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[\fB-c\fR | \fB--console\fR] [\fB-p\fR ENCODING | \fB--pretty\fR ENCODING] [\fB-t\fR TYPE | \fB--types\fR TYPE] [\fB-o\fR ORDER | \fB--order\fR ORDER] [\fB-q\fR | \fB--quiet\fR] [\fB-d\fR | \fB--doctest\fR] [\fB-C\fR | \fB--no-cache\fR] [\fB-a\fR | \fB--auto\fR] [\fB-D\fR | \fB--debug\fR] [ +-- | PYTHONOPTIONS] +'in \n(.iu-\nxu +.ad b +'hy +'nh +.fi +.ad l +\fBisympy\fR \kx +.if (\nx>(\n(.l/2)) .nr x (\n(.l/5) +'in \n(.iu+\nxu +[ +{\fB-h\fR | \fB--help\fR} +| +{\fB-v\fR | \fB--version\fR} +] +'in \n(.iu-\nxu +.ad b +'hy +.SH DESCRIPTION +isympy is a Python shell for SymPy. It is just a normal python shell +(ipython shell if you have the ipython package installed) that executes +the following commands so that you don't have to: +.PP +.nf +\*(T< +>>> from __future__ import division +>>> from sympy import * +>>> x, y, z = symbols("x,y,z") +>>> k, m, n = symbols("k,m,n", integer=True) + \*(T> +.fi +.PP +So starting isympy is equivalent to starting python (or ipython) and +executing the above commands by hand. It is intended for easy and quick +experimentation with SymPy. For more complicated programs, it is recommended +to write a script and import things explicitly (using the "from sympy +import sin, log, Symbol, ..." idiom). +.SH OPTIONS +.TP +\*(T<\fB\-c \fR\*(T>\fISHELL\fR, \*(T<\fB\-\-console=\fR\*(T>\fISHELL\fR +Use the specified shell (python or ipython) as +console backend instead of the default one (ipython +if present or python otherwise). + +Example: isympy -c python + +\fISHELL\fR could be either +\&'ipython' or 'python' +.TP +\*(T<\fB\-p \fR\*(T>\fIENCODING\fR, \*(T<\fB\-\-pretty=\fR\*(T>\fIENCODING\fR +Setup pretty printing in SymPy. By default, the most pretty, unicode +printing is enabled (if the terminal supports it). You can use less +pretty ASCII printing instead or no pretty printing at all. + +Example: isympy -p no + +\fIENCODING\fR must be one of 'unicode', +\&'ascii' or 'no'. +.TP +\*(T<\fB\-t \fR\*(T>\fITYPE\fR, \*(T<\fB\-\-types=\fR\*(T>\fITYPE\fR +Setup the ground types for the polys. By default, gmpy ground types +are used if gmpy2 or gmpy is installed, otherwise it falls back to python +ground types, which are a little bit slower. You can manually +choose python ground types even if gmpy is installed (e.g., for testing purposes). + +Note that sympy ground types are not supported, and should be used +only for experimental purposes. + +Note that the gmpy1 ground type is primarily intended for testing; it the +use of gmpy even if gmpy2 is available. + +This is the same as setting the environment variable +SYMPY_GROUND_TYPES to the given ground type (e.g., +SYMPY_GROUND_TYPES='gmpy') + +The ground types can be determined interactively from the variable +sympy.polys.domains.GROUND_TYPES inside the isympy shell itself. + +Example: isympy -t python + +\fITYPE\fR must be one of 'gmpy', +\&'gmpy1' or 'python'. +.TP +\*(T<\fB\-o \fR\*(T>\fIORDER\fR, \*(T<\fB\-\-order=\fR\*(T>\fIORDER\fR +Setup the ordering of terms for printing. The default is lex, which +orders terms lexicographically (e.g., x**2 + x + 1). You can choose +other orderings, such as rev-lex, which will use reverse +lexicographic ordering (e.g., 1 + x + x**2). + +Note that for very large expressions, ORDER='none' may speed up +printing considerably, with the tradeoff that the order of the terms +in the printed expression will have no canonical order + +Example: isympy -o rev-lax + +\fIORDER\fR must be one of 'lex', 'rev-lex', 'grlex', +\&'rev-grlex', 'grevlex', 'rev-grevlex', 'old', or 'none'. +.TP +\*(T<\fB\-q\fR\*(T>, \*(T<\fB\-\-quiet\fR\*(T> +Print only Python's and SymPy's versions to stdout at startup, and nothing else. +.TP +\*(T<\fB\-d\fR\*(T>, \*(T<\fB\-\-doctest\fR\*(T> +Use the same format that should be used for doctests. This is +equivalent to '\fIisympy -c python -p no\fR'. +.TP +\*(T<\fB\-C\fR\*(T>, \*(T<\fB\-\-no\-cache\fR\*(T> +Disable the caching mechanism. Disabling the cache may slow certain +operations down considerably. This is useful for testing the cache, +or for benchmarking, as the cache can result in deceptive benchmark timings. + +This is the same as setting the environment variable SYMPY_USE_CACHE +to 'no'. +.TP +\*(T<\fB\-a\fR\*(T>, \*(T<\fB\-\-auto\fR\*(T> +Automatically create missing symbols. Normally, typing a name of a +Symbol that has not been instantiated first would raise NameError, +but with this option enabled, any undefined name will be +automatically created as a Symbol. This only works in IPython 0.11. + +Note that this is intended only for interactive, calculator style +usage. In a script that uses SymPy, Symbols should be instantiated +at the top, so that it's clear what they are. + +This will not override any names that are already defined, which +includes the single character letters represented by the mnemonic +QCOSINE (see the "Gotchas and Pitfalls" document in the +documentation). You can delete existing names by executing "del +name" in the shell itself. You can see if a name is defined by typing +"'name' in globals()". + +The Symbols that are created using this have default assumptions. +If you want to place assumptions on symbols, you should create them +using symbols() or var(). + +Finally, this only works in the top level namespace. So, for +example, if you define a function in isympy with an undefined +Symbol, it will not work. +.TP +\*(T<\fB\-D\fR\*(T>, \*(T<\fB\-\-debug\fR\*(T> +Enable debugging output. This is the same as setting the +environment variable SYMPY_DEBUG to 'True'. The debug status is set +in the variable SYMPY_DEBUG within isympy. +.TP +-- \fIPYTHONOPTIONS\fR +These options will be passed on to \fIipython (1)\fR shell. +Only supported when ipython is being used (standard python shell not supported). + +Two dashes (--) are required to separate \fIPYTHONOPTIONS\fR +from the other isympy options. + +For example, to run iSymPy without startup banner and colors: + +isympy -q -c ipython -- --colors=NoColor +.TP +\*(T<\fB\-h\fR\*(T>, \*(T<\fB\-\-help\fR\*(T> +Print help output and exit. +.TP +\*(T<\fB\-v\fR\*(T>, \*(T<\fB\-\-version\fR\*(T> +Print isympy version information and exit. +.SH FILES +.TP +\*(T<\fI${HOME}/.sympy\-history\fR\*(T> +Saves the history of commands when using the python +shell as backend. +.SH BUGS +The upstreams BTS can be found at \(lahttps://github.com/sympy/sympy/issues\(ra +Please report all bugs that you find in there, this will help improve +the overall quality of SymPy. +.SH "SEE ALSO" +\fBipython\fR(1), \fBpython\fR(1)