applied-ai-018 commited on
Commit
a1ea65c
·
verified ·
1 Parent(s): 5bdbb9f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/bin/Activate.ps1 +247 -0
  2. env-llmeval/bin/accelerate +8 -0
  3. env-llmeval/bin/accelerate-config +8 -0
  4. env-llmeval/bin/accelerate-estimate-memory +8 -0
  5. env-llmeval/bin/accelerate-launch +8 -0
  6. env-llmeval/bin/activate +69 -0
  7. env-llmeval/bin/activate.csh +26 -0
  8. env-llmeval/bin/activate.fish +69 -0
  9. env-llmeval/bin/chardetect +8 -0
  10. env-llmeval/bin/convert-caffe2-to-onnx +8 -0
  11. env-llmeval/bin/convert-onnx-to-caffe2 +8 -0
  12. env-llmeval/bin/datasets-cli +8 -0
  13. env-llmeval/bin/evaluate-cli +8 -0
  14. env-llmeval/bin/get_gprof +75 -0
  15. env-llmeval/bin/get_objgraph +54 -0
  16. env-llmeval/bin/huggingface-cli +8 -0
  17. env-llmeval/bin/isympy +8 -0
  18. env-llmeval/bin/lm-eval +8 -0
  19. env-llmeval/bin/lm_eval +8 -0
  20. env-llmeval/bin/nltk +8 -0
  21. env-llmeval/bin/normalizer +8 -0
  22. env-llmeval/bin/pip +8 -0
  23. env-llmeval/bin/pip3 +8 -0
  24. env-llmeval/bin/pip3.10 +8 -0
  25. env-llmeval/bin/pybind11-config +8 -0
  26. env-llmeval/bin/sacrebleu +8 -0
  27. env-llmeval/bin/tabulate +8 -0
  28. env-llmeval/bin/torchrun +8 -0
  29. env-llmeval/bin/tqdm +8 -0
  30. env-llmeval/bin/transformers-cli +8 -0
  31. env-llmeval/bin/undill +22 -0
  32. env-llmeval/lib/python3.10/site-packages/accelerate/__init__.py +48 -0
  33. env-llmeval/lib/python3.10/site-packages/accelerate/checkpointing.py +275 -0
  34. env-llmeval/lib/python3.10/site-packages/accelerate/launchers.py +258 -0
  35. env-llmeval/lib/python3.10/site-packages/accelerate/local_sgd.py +102 -0
  36. env-llmeval/lib/python3.10/site-packages/accelerate/logging.py +123 -0
  37. env-llmeval/lib/python3.10/site-packages/accelerate/optimizer.py +193 -0
  38. env-llmeval/lib/python3.10/site-packages/accelerate/scheduler.py +98 -0
  39. env-llmeval/lib/python3.10/site-packages/accelerate/state.py +1202 -0
  40. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__init__.py +50 -0
  41. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc +0 -0
  42. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/examples.py +146 -0
  46. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py +26 -0
  47. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py +238 -0
  48. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py +392 -0
  49. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/testing.py +605 -0
  50. env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/training.py +101 -0
env-llmeval/bin/Activate.ps1 ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <#
2
+ .Synopsis
3
+ Activate a Python virtual environment for the current PowerShell session.
4
+
5
+ .Description
6
+ Pushes the python executable for a virtual environment to the front of the
7
+ $Env:PATH environment variable and sets the prompt to signify that you are
8
+ in a Python virtual environment. Makes use of the command line switches as
9
+ well as the `pyvenv.cfg` file values present in the virtual environment.
10
+
11
+ .Parameter VenvDir
12
+ Path to the directory that contains the virtual environment to activate. The
13
+ default value for this is the parent of the directory that the Activate.ps1
14
+ script is located within.
15
+
16
+ .Parameter Prompt
17
+ The prompt prefix to display when this virtual environment is activated. By
18
+ default, this prompt is the name of the virtual environment folder (VenvDir)
19
+ surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
+
21
+ .Example
22
+ Activate.ps1
23
+ Activates the Python virtual environment that contains the Activate.ps1 script.
24
+
25
+ .Example
26
+ Activate.ps1 -Verbose
27
+ Activates the Python virtual environment that contains the Activate.ps1 script,
28
+ and shows extra information about the activation as it executes.
29
+
30
+ .Example
31
+ Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
+ Activates the Python virtual environment located in the specified location.
33
+
34
+ .Example
35
+ Activate.ps1 -Prompt "MyPython"
36
+ Activates the Python virtual environment that contains the Activate.ps1 script,
37
+ and prefixes the current prompt with the specified string (surrounded in
38
+ parentheses) while the virtual environment is active.
39
+
40
+ .Notes
41
+ On Windows, it may be required to enable this Activate.ps1 script by setting the
42
+ execution policy for the user. You can do this by issuing the following PowerShell
43
+ command:
44
+
45
+ PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
+
47
+ For more information on Execution Policies:
48
+ https://go.microsoft.com/fwlink/?LinkID=135170
49
+
50
+ #>
51
+ Param(
52
+ [Parameter(Mandatory = $false)]
53
+ [String]
54
+ $VenvDir,
55
+ [Parameter(Mandatory = $false)]
56
+ [String]
57
+ $Prompt
58
+ )
59
+
60
+ <# Function declarations --------------------------------------------------- #>
61
+
62
+ <#
63
+ .Synopsis
64
+ Remove all shell session elements added by the Activate script, including the
65
+ addition of the virtual environment's Python executable from the beginning of
66
+ the PATH variable.
67
+
68
+ .Parameter NonDestructive
69
+ If present, do not remove this function from the global namespace for the
70
+ session.
71
+
72
+ #>
73
+ function global:deactivate ([switch]$NonDestructive) {
74
+ # Revert to original values
75
+
76
+ # The prior prompt:
77
+ if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
+ Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
+ Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
+ }
81
+
82
+ # The prior PYTHONHOME:
83
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
+ Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
+ Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
+ }
87
+
88
+ # The prior PATH:
89
+ if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
+ Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
+ Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
+ }
93
+
94
+ # Just remove the VIRTUAL_ENV altogether:
95
+ if (Test-Path -Path Env:VIRTUAL_ENV) {
96
+ Remove-Item -Path env:VIRTUAL_ENV
97
+ }
98
+
99
+ # Just remove VIRTUAL_ENV_PROMPT altogether.
100
+ if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
+ Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
+ }
103
+
104
+ # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
+ if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
+ Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
+ }
108
+
109
+ # Leave deactivate function in the global namespace if requested:
110
+ if (-not $NonDestructive) {
111
+ Remove-Item -Path function:deactivate
112
+ }
113
+ }
114
+
115
+ <#
116
+ .Description
117
+ Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
+ given folder, and returns them in a map.
119
+
120
+ For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
+ two strings separated by `=` (with any amount of whitespace surrounding the =)
122
+ then it is considered a `key = value` line. The left hand string is the key,
123
+ the right hand is the value.
124
+
125
+ If the value starts with a `'` or a `"` then the first and last character is
126
+ stripped from the value before being captured.
127
+
128
+ .Parameter ConfigDir
129
+ Path to the directory that contains the `pyvenv.cfg` file.
130
+ #>
131
+ function Get-PyVenvConfig(
132
+ [String]
133
+ $ConfigDir
134
+ ) {
135
+ Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
+
137
+ # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
+ $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
+
140
+ # An empty map will be returned if no config file is found.
141
+ $pyvenvConfig = @{ }
142
+
143
+ if ($pyvenvConfigPath) {
144
+
145
+ Write-Verbose "File exists, parse `key = value` lines"
146
+ $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
+
148
+ $pyvenvConfigContent | ForEach-Object {
149
+ $keyval = $PSItem -split "\s*=\s*", 2
150
+ if ($keyval[0] -and $keyval[1]) {
151
+ $val = $keyval[1]
152
+
153
+ # Remove extraneous quotations around a string value.
154
+ if ("'""".Contains($val.Substring(0, 1))) {
155
+ $val = $val.Substring(1, $val.Length - 2)
156
+ }
157
+
158
+ $pyvenvConfig[$keyval[0]] = $val
159
+ Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
+ }
161
+ }
162
+ }
163
+ return $pyvenvConfig
164
+ }
165
+
166
+
167
+ <# Begin Activate script --------------------------------------------------- #>
168
+
169
+ # Determine the containing directory of this script
170
+ $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
+ $VenvExecDir = Get-Item -Path $VenvExecPath
172
+
173
+ Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
+ Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
+ Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
+
177
+ # Set values required in priority: CmdLine, ConfigFile, Default
178
+ # First, get the location of the virtual environment, it might not be
179
+ # VenvExecDir if specified on the command line.
180
+ if ($VenvDir) {
181
+ Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
+ }
183
+ else {
184
+ Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
+ $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
+ Write-Verbose "VenvDir=$VenvDir"
187
+ }
188
+
189
+ # Next, read the `pyvenv.cfg` file to determine any required value such
190
+ # as `prompt`.
191
+ $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
+
193
+ # Next, set the prompt from the command line, or the config file, or
194
+ # just use the name of the virtual environment folder.
195
+ if ($Prompt) {
196
+ Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
+ }
198
+ else {
199
+ Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
+ if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
+ Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
+ $Prompt = $pyvenvCfg['prompt'];
203
+ }
204
+ else {
205
+ Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
+ Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
+ $Prompt = Split-Path -Path $venvDir -Leaf
208
+ }
209
+ }
210
+
211
+ Write-Verbose "Prompt = '$Prompt'"
212
+ Write-Verbose "VenvDir='$VenvDir'"
213
+
214
+ # Deactivate any currently active virtual environment, but leave the
215
+ # deactivate function in place.
216
+ deactivate -nondestructive
217
+
218
+ # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
+ # that there is an activated venv.
220
+ $env:VIRTUAL_ENV = $VenvDir
221
+
222
+ if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
+
224
+ Write-Verbose "Setting prompt to '$Prompt'"
225
+
226
+ # Set the prompt to include the env name
227
+ # Make sure _OLD_VIRTUAL_PROMPT is global
228
+ function global:_OLD_VIRTUAL_PROMPT { "" }
229
+ Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
+ New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
+
232
+ function global:prompt {
233
+ Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
+ _OLD_VIRTUAL_PROMPT
235
+ }
236
+ $env:VIRTUAL_ENV_PROMPT = $Prompt
237
+ }
238
+
239
+ # Clear PYTHONHOME
240
+ if (Test-Path -Path Env:PYTHONHOME) {
241
+ Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
+ Remove-Item -Path Env:PYTHONHOME
243
+ }
244
+
245
+ # Add the venv to the PATH
246
+ Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
+ $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
env-llmeval/bin/accelerate ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from accelerate.commands.accelerate_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/accelerate-config ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from accelerate.commands.config import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/accelerate-estimate-memory ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from accelerate.commands.estimate import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/accelerate-launch ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from accelerate.commands.launch import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/activate ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate" *from bash*
2
+ # you cannot run it directly
3
+
4
+ deactivate () {
5
+ # reset old environment variables
6
+ if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
7
+ PATH="${_OLD_VIRTUAL_PATH:-}"
8
+ export PATH
9
+ unset _OLD_VIRTUAL_PATH
10
+ fi
11
+ if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
12
+ PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
13
+ export PYTHONHOME
14
+ unset _OLD_VIRTUAL_PYTHONHOME
15
+ fi
16
+
17
+ # This should detect bash and zsh, which have a hash command that must
18
+ # be called to get it to forget past commands. Without forgetting
19
+ # past commands the $PATH changes we made may not be respected
20
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
21
+ hash -r 2> /dev/null
22
+ fi
23
+
24
+ if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
25
+ PS1="${_OLD_VIRTUAL_PS1:-}"
26
+ export PS1
27
+ unset _OLD_VIRTUAL_PS1
28
+ fi
29
+
30
+ unset VIRTUAL_ENV
31
+ unset VIRTUAL_ENV_PROMPT
32
+ if [ ! "${1:-}" = "nondestructive" ] ; then
33
+ # Self destruct!
34
+ unset -f deactivate
35
+ fi
36
+ }
37
+
38
+ # unset irrelevant variables
39
+ deactivate nondestructive
40
+
41
+ VIRTUAL_ENV="/home/sdp/llm_eval/env-llmeval"
42
+ export VIRTUAL_ENV
43
+
44
+ _OLD_VIRTUAL_PATH="$PATH"
45
+ PATH="$VIRTUAL_ENV/bin:$PATH"
46
+ export PATH
47
+
48
+ # unset PYTHONHOME if set
49
+ # this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
50
+ # could use `if (set -u; : $PYTHONHOME) ;` in bash
51
+ if [ -n "${PYTHONHOME:-}" ] ; then
52
+ _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
53
+ unset PYTHONHOME
54
+ fi
55
+
56
+ if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
57
+ _OLD_VIRTUAL_PS1="${PS1:-}"
58
+ PS1="(env-llmeval) ${PS1:-}"
59
+ export PS1
60
+ VIRTUAL_ENV_PROMPT="(env-llmeval) "
61
+ export VIRTUAL_ENV_PROMPT
62
+ fi
63
+
64
+ # This should detect bash and zsh, which have a hash command that must
65
+ # be called to get it to forget past commands. Without forgetting
66
+ # past commands the $PATH changes we made may not be respected
67
+ if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
68
+ hash -r 2> /dev/null
69
+ fi
env-llmeval/bin/activate.csh ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source bin/activate.csh" *from csh*.
2
+ # You cannot run it directly.
3
+ # Created by Davide Di Blasi <[email protected]>.
4
+ # Ported to Python 3.3 venv by Andrew Svetlov <[email protected]>
5
+
6
+ alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate'
7
+
8
+ # Unset irrelevant variables.
9
+ deactivate nondestructive
10
+
11
+ setenv VIRTUAL_ENV "/home/sdp/llm_eval/env-llmeval"
12
+
13
+ set _OLD_VIRTUAL_PATH="$PATH"
14
+ setenv PATH "$VIRTUAL_ENV/bin:$PATH"
15
+
16
+
17
+ set _OLD_VIRTUAL_PROMPT="$prompt"
18
+
19
+ if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
20
+ set prompt = "(env-llmeval) $prompt"
21
+ setenv VIRTUAL_ENV_PROMPT "(env-llmeval) "
22
+ endif
23
+
24
+ alias pydoc python -m pydoc
25
+
26
+ rehash
env-llmeval/bin/activate.fish ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file must be used with "source <venv>/bin/activate.fish" *from fish*
2
+ # (https://fishshell.com/); you cannot run it directly.
3
+
4
+ function deactivate -d "Exit virtual environment and return to normal shell environment"
5
+ # reset old environment variables
6
+ if test -n "$_OLD_VIRTUAL_PATH"
7
+ set -gx PATH $_OLD_VIRTUAL_PATH
8
+ set -e _OLD_VIRTUAL_PATH
9
+ end
10
+ if test -n "$_OLD_VIRTUAL_PYTHONHOME"
11
+ set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
12
+ set -e _OLD_VIRTUAL_PYTHONHOME
13
+ end
14
+
15
+ if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
16
+ set -e _OLD_FISH_PROMPT_OVERRIDE
17
+ # prevents error when using nested fish instances (Issue #93858)
18
+ if functions -q _old_fish_prompt
19
+ functions -e fish_prompt
20
+ functions -c _old_fish_prompt fish_prompt
21
+ functions -e _old_fish_prompt
22
+ end
23
+ end
24
+
25
+ set -e VIRTUAL_ENV
26
+ set -e VIRTUAL_ENV_PROMPT
27
+ if test "$argv[1]" != "nondestructive"
28
+ # Self-destruct!
29
+ functions -e deactivate
30
+ end
31
+ end
32
+
33
+ # Unset irrelevant variables.
34
+ deactivate nondestructive
35
+
36
+ set -gx VIRTUAL_ENV "/home/sdp/llm_eval/env-llmeval"
37
+
38
+ set -gx _OLD_VIRTUAL_PATH $PATH
39
+ set -gx PATH "$VIRTUAL_ENV/bin" $PATH
40
+
41
+ # Unset PYTHONHOME if set.
42
+ if set -q PYTHONHOME
43
+ set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
44
+ set -e PYTHONHOME
45
+ end
46
+
47
+ if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
48
+ # fish uses a function instead of an env var to generate the prompt.
49
+
50
+ # Save the current fish_prompt function as the function _old_fish_prompt.
51
+ functions -c fish_prompt _old_fish_prompt
52
+
53
+ # With the original prompt function renamed, we can override with our own.
54
+ function fish_prompt
55
+ # Save the return status of the last command.
56
+ set -l old_status $status
57
+
58
+ # Output the venv prompt; color taken from the blue of the Python logo.
59
+ printf "%s%s%s" (set_color 4B8BBE) "(env-llmeval) " (set_color normal)
60
+
61
+ # Restore the return status of the previous command.
62
+ echo "exit $old_status" | .
63
+ # Output the original/"old" prompt.
64
+ _old_fish_prompt
65
+ end
66
+
67
+ set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
68
+ set -gx VIRTUAL_ENV_PROMPT "(env-llmeval) "
69
+ end
env-llmeval/bin/chardetect ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from chardet.cli.chardetect import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/convert-caffe2-to-onnx ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from caffe2.python.onnx.bin.conversion import caffe2_to_onnx
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(caffe2_to_onnx())
env-llmeval/bin/convert-onnx-to-caffe2 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from caffe2.python.onnx.bin.conversion import onnx_to_caffe2
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(onnx_to_caffe2())
env-llmeval/bin/datasets-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from datasets.commands.datasets_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/evaluate-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from evaluate.commands.evaluate_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/get_gprof ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2008-2016 California Institute of Technology.
5
+ # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
6
+ # License: 3-clause BSD. The full license text is available at:
7
+ # - https://github.com/uqfoundation/dill/blob/master/LICENSE
8
+ '''
9
+ build profile graph for the given instance
10
+
11
+ running:
12
+ $ get_gprof <args> <instance>
13
+
14
+ executes:
15
+ gprof2dot -f pstats <args> <type>.prof | dot -Tpng -o <type>.call.png
16
+
17
+ where:
18
+ <args> are arguments for gprof2dot, such as "-n 5 -e 5"
19
+ <instance> is code to create the instance to profile
20
+ <type> is the class of the instance (i.e. type(instance))
21
+
22
+ For example:
23
+ $ get_gprof -n 5 -e 1 "import numpy; numpy.array([1,2])"
24
+
25
+ will create 'ndarray.call.png' with the profile graph for numpy.array([1,2]),
26
+ where '-n 5' eliminates nodes below 5% threshold, similarly '-e 1' eliminates
27
+ edges below 1% threshold
28
+ '''
29
+
30
+ if __name__ == "__main__":
31
+ import sys
32
+ if len(sys.argv) < 2:
33
+ print ("Please provide an object instance (e.g. 'import math; math.pi')")
34
+ sys.exit()
35
+ # grab args for gprof2dot
36
+ args = sys.argv[1:-1]
37
+ args = ' '.join(args)
38
+ # last arg builds the object
39
+ obj = sys.argv[-1]
40
+ obj = obj.split(';')
41
+ # multi-line prep for generating an instance
42
+ for line in obj[:-1]:
43
+ exec(line)
44
+ # one-line generation of an instance
45
+ try:
46
+ obj = eval(obj[-1])
47
+ except Exception:
48
+ print ("Error processing object instance")
49
+ sys.exit()
50
+
51
+ # get object 'name'
52
+ objtype = type(obj)
53
+ name = getattr(objtype, '__name__', getattr(objtype, '__class__', objtype))
54
+
55
+ # profile dumping an object
56
+ import dill
57
+ import os
58
+ import cProfile
59
+ #name = os.path.splitext(os.path.basename(__file__))[0]
60
+ cProfile.run("dill.dumps(obj)", filename="%s.prof" % name)
61
+ msg = "gprof2dot -f pstats %s %s.prof | dot -Tpng -o %s.call.png" % (args, name, name)
62
+ try:
63
+ res = os.system(msg)
64
+ except Exception:
65
+ print ("Please verify install of 'gprof2dot' to view profile graphs")
66
+ if res:
67
+ print ("Please verify install of 'gprof2dot' to view profile graphs")
68
+
69
+ # get stats
70
+ f_prof = "%s.prof" % name
71
+ import pstats
72
+ stats = pstats.Stats(f_prof, stream=sys.stdout)
73
+ stats.strip_dirs().sort_stats('cumtime')
74
+ stats.print_stats(20) #XXX: save to file instead of print top 20?
75
+ os.remove(f_prof)
env-llmeval/bin/get_objgraph ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2008-2016 California Institute of Technology.
5
+ # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
6
+ # License: 3-clause BSD. The full license text is available at:
7
+ # - https://github.com/uqfoundation/dill/blob/master/LICENSE
8
+ """
9
+ display the reference paths for objects in ``dill.types`` or a .pkl file
10
+
11
+ Notes:
12
+ the generated image is useful in showing the pointer references in
13
+ objects that are or can be pickled. Any object in ``dill.objects``
14
+ listed in ``dill.load_types(picklable=True, unpicklable=True)`` works.
15
+
16
+ Examples::
17
+
18
+ $ get_objgraph ArrayType
19
+ Image generated as ArrayType.png
20
+ """
21
+
22
+ import dill as pickle
23
+ #pickle.debug.trace(True)
24
+ #import pickle
25
+
26
+ # get all objects for testing
27
+ from dill import load_types
28
+ load_types(pickleable=True,unpickleable=True)
29
+ from dill import objects
30
+
31
+ if __name__ == "__main__":
32
+ import sys
33
+ if len(sys.argv) != 2:
34
+ print ("Please provide exactly one file or type name (e.g. 'IntType')")
35
+ msg = "\n"
36
+ for objtype in list(objects.keys())[:40]:
37
+ msg += objtype + ', '
38
+ print (msg + "...")
39
+ else:
40
+ objtype = str(sys.argv[-1])
41
+ try:
42
+ obj = objects[objtype]
43
+ except KeyError:
44
+ obj = pickle.load(open(objtype,'rb'))
45
+ import os
46
+ objtype = os.path.splitext(objtype)[0]
47
+ try:
48
+ import objgraph
49
+ objgraph.show_refs(obj, filename=objtype+'.png')
50
+ except ImportError:
51
+ print ("Please install 'objgraph' to view object graphs")
52
+
53
+
54
+ # EOF
env-llmeval/bin/huggingface-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from huggingface_hub.commands.huggingface_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/isympy ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from isympy import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/lm-eval ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from lm_eval.__main__ import cli_evaluate
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli_evaluate())
env-llmeval/bin/lm_eval ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from lm_eval.__main__ import cli_evaluate
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli_evaluate())
env-llmeval/bin/nltk ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from nltk.cli import cli
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli())
env-llmeval/bin/normalizer ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from charset_normalizer.cli import cli_detect
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(cli_detect())
env-llmeval/bin/pip ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/pip3 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/pip3.10 ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pip._internal.cli.main import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/pybind11-config ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from pybind11.__main__ import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/sacrebleu ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from sacrebleu.sacrebleu import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/tabulate ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from tabulate import _main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(_main())
env-llmeval/bin/torchrun ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from torch.distributed.run import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/tqdm ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from tqdm.cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/transformers-cli ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from transformers.commands.transformers_cli import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/undill ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ #
3
+ # Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
4
+ # Copyright (c) 2008-2016 California Institute of Technology.
5
+ # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation.
6
+ # License: 3-clause BSD. The full license text is available at:
7
+ # - https://github.com/uqfoundation/dill/blob/master/LICENSE
8
+ """
9
+ unpickle the contents of a pickled object file
10
+
11
+ Examples::
12
+
13
+ $ undill hello.pkl
14
+ ['hello', 'world']
15
+ """
16
+
17
+ if __name__ == '__main__':
18
+ import sys
19
+ import dill
20
+ for file in sys.argv[1:]:
21
+ print (dill.load(open(file,'rb')))
22
+
env-llmeval/lib/python3.10/site-packages/accelerate/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ __version__ = "0.29.2"
15
+
16
+ from .accelerator import Accelerator
17
+ from .big_modeling import (
18
+ cpu_offload,
19
+ cpu_offload_with_hook,
20
+ disk_offload,
21
+ dispatch_model,
22
+ init_empty_weights,
23
+ init_on_device,
24
+ load_checkpoint_and_dispatch,
25
+ )
26
+ from .data_loader import skip_first_batches
27
+ from .inference import prepare_pippy
28
+ from .launchers import debug_launcher, notebook_launcher
29
+ from .state import PartialState
30
+ from .utils import (
31
+ AutocastKwargs,
32
+ DataLoaderConfiguration,
33
+ DeepSpeedPlugin,
34
+ DistributedDataParallelKwargs,
35
+ DistributedType,
36
+ FullyShardedDataParallelPlugin,
37
+ GradScalerKwargs,
38
+ InitProcessGroupKwargs,
39
+ find_executable_batch_size,
40
+ infer_auto_device_map,
41
+ is_rich_available,
42
+ load_checkpoint_in_model,
43
+ synchronize_rng_states,
44
+ )
45
+
46
+
47
+ if is_rich_available():
48
+ from .utils import rich
env-llmeval/lib/python3.10/site-packages/accelerate/checkpointing.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import random
16
+ from pathlib import Path
17
+ from typing import List
18
+
19
+ import numpy as np
20
+ import torch
21
+ from safetensors.torch import load_file
22
+ from torch.cuda.amp import GradScaler
23
+
24
+ from .utils import (
25
+ MODEL_NAME,
26
+ OPTIMIZER_NAME,
27
+ RNG_STATE_NAME,
28
+ SAFE_MODEL_NAME,
29
+ SAFE_WEIGHTS_NAME,
30
+ SAMPLER_NAME,
31
+ SCALER_NAME,
32
+ SCHEDULER_NAME,
33
+ WEIGHTS_NAME,
34
+ get_pretty_name,
35
+ is_torch_xla_available,
36
+ is_xpu_available,
37
+ save,
38
+ )
39
+
40
+
41
+ if is_torch_xla_available():
42
+ import torch_xla.core.xla_model as xm
43
+
44
+ from .logging import get_logger
45
+ from .state import PartialState
46
+
47
+
48
+ logger = get_logger(__name__)
49
+
50
+
51
+ def save_accelerator_state(
52
+ output_dir: str,
53
+ model_states: List[dict],
54
+ optimizers: list,
55
+ schedulers: list,
56
+ dataloaders: list,
57
+ process_index: int,
58
+ scaler: GradScaler = None,
59
+ save_on_each_node: bool = False,
60
+ safe_serialization: bool = True,
61
+ ):
62
+ """
63
+ Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
64
+
65
+ <Tip>
66
+
67
+ If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
68
+ `pickle`.
69
+
70
+ </Tip>
71
+
72
+ Args:
73
+ output_dir (`str` or `os.PathLike`):
74
+ The name of the folder to save all relevant weights and states.
75
+ model_states (`List[torch.nn.Module]`):
76
+ A list of model states
77
+ optimizers (`List[torch.optim.Optimizer]`):
78
+ A list of optimizer instances
79
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
80
+ A list of learning rate schedulers
81
+ dataloaders (`List[torch.utils.data.DataLoader]`):
82
+ A list of dataloader instances to save their sampler states
83
+ process_index (`int`):
84
+ The current process index in the Accelerator state
85
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
86
+ An optional gradient scaler instance to save
87
+ save_on_each_node (`bool`, *optional*):
88
+ Whether to save on every node, or only the main node.
89
+ safe_serialization (`bool`, *optional*, defaults to `True`):
90
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
91
+ """
92
+ output_dir = Path(output_dir)
93
+ # Model states
94
+ for i, state in enumerate(model_states):
95
+ weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
96
+ if i > 0:
97
+ weights_name = weights_name.replace(".", f"_{i}.")
98
+ output_model_file = output_dir.joinpath(weights_name)
99
+ save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
100
+ logger.info(f"Model weights saved in {output_model_file}")
101
+ # Optimizer states
102
+ for i, opt in enumerate(optimizers):
103
+ state = opt.state_dict()
104
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
105
+ output_optimizer_file = output_dir.joinpath(optimizer_name)
106
+ save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
107
+ logger.info(f"Optimizer state saved in {output_optimizer_file}")
108
+ # Scheduler states
109
+ for i, scheduler in enumerate(schedulers):
110
+ state = scheduler.state_dict()
111
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
112
+ output_scheduler_file = output_dir.joinpath(scheduler_name)
113
+ save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
114
+ logger.info(f"Scheduler state saved in {output_scheduler_file}")
115
+ # DataLoader states
116
+ for i, dataloader in enumerate(dataloaders):
117
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
118
+ output_sampler_file = output_dir.joinpath(sampler_name)
119
+ # Only save if we have our custom sampler
120
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
121
+
122
+ if isinstance(dataloader.dataset, IterableDatasetShard):
123
+ sampler = dataloader.sampler.sampler
124
+
125
+ if isinstance(sampler, SeedableRandomSampler):
126
+ save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
127
+ logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
128
+
129
+ # GradScaler state
130
+ if scaler is not None:
131
+ state = scaler.state_dict()
132
+ output_scaler_file = output_dir.joinpath(SCALER_NAME)
133
+ torch.save(state, output_scaler_file)
134
+ logger.info(f"Gradient scaler state saved in {output_scaler_file}")
135
+ # Random number generator states
136
+ states = {}
137
+ states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
138
+ states["random_state"] = random.getstate()
139
+ states["numpy_random_seed"] = np.random.get_state()
140
+ states["torch_manual_seed"] = torch.get_rng_state()
141
+ if is_xpu_available():
142
+ states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
143
+ else:
144
+ states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
145
+ if is_torch_xla_available():
146
+ states["xm_seed"] = xm.get_rng_state()
147
+ output_states_file = output_dir.joinpath(states_name)
148
+ torch.save(states, output_states_file)
149
+ logger.info(f"Random states saved in {output_states_file}")
150
+ return output_dir
151
+
152
+
153
+ def load_accelerator_state(
154
+ input_dir,
155
+ models,
156
+ optimizers,
157
+ schedulers,
158
+ dataloaders,
159
+ process_index,
160
+ scaler=None,
161
+ map_location=None,
162
+ **load_model_func_kwargs,
163
+ ):
164
+ """
165
+ Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
166
+
167
+ Args:
168
+ input_dir (`str` or `os.PathLike`):
169
+ The name of the folder to load all relevant weights and states.
170
+ models (`List[torch.nn.Module]`):
171
+ A list of model instances
172
+ optimizers (`List[torch.optim.Optimizer]`):
173
+ A list of optimizer instances
174
+ schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
175
+ A list of learning rate schedulers
176
+ process_index (`int`):
177
+ The current process index in the Accelerator state
178
+ scaler (`torch.cuda.amp.GradScaler`, *optional*):
179
+ An optional *GradScaler* instance to load
180
+ map_location (`str`, *optional*):
181
+ What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
182
+ load_model_func_kwargs (`dict`, *optional*):
183
+ Additional arguments that can be passed to the model's `load_state_dict` method.
184
+ """
185
+ if map_location not in [None, "cpu", "on_device"]:
186
+ raise TypeError(
187
+ "Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
188
+ )
189
+ if map_location is None:
190
+ map_location = "cpu"
191
+ elif map_location == "on_device":
192
+ map_location = PartialState().device
193
+
194
+ input_dir = Path(input_dir)
195
+ # Model states
196
+ for i, model in enumerate(models):
197
+ ending = f"_{i}" if i > 0 else ""
198
+ input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
199
+ if input_model_file.exists():
200
+ state_dict = load_file(input_model_file, device=str(map_location))
201
+ else:
202
+ # Load with torch
203
+ input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
204
+ state_dict = torch.load(input_model_file, map_location=map_location)
205
+ models[i].load_state_dict(state_dict, **load_model_func_kwargs)
206
+ logger.info("All model weights loaded successfully")
207
+
208
+ # Optimizer states
209
+ for i, opt in enumerate(optimizers):
210
+ optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
211
+ input_optimizer_file = input_dir.joinpath(optimizer_name)
212
+ optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
213
+ optimizers[i].load_state_dict(optimizer_state)
214
+ logger.info("All optimizer states loaded successfully")
215
+
216
+ # Scheduler states
217
+ for i, scheduler in enumerate(schedulers):
218
+ scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
219
+ input_scheduler_file = input_dir.joinpath(scheduler_name)
220
+ scheduler.load_state_dict(torch.load(input_scheduler_file))
221
+ logger.info("All scheduler states loaded successfully")
222
+
223
+ for i, dataloader in enumerate(dataloaders):
224
+ sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
225
+ input_sampler_file = input_dir.joinpath(sampler_name)
226
+ # Only load if we have our custom sampler
227
+ from .data_loader import IterableDatasetShard, SeedableRandomSampler
228
+
229
+ if isinstance(dataloader.dataset, IterableDatasetShard):
230
+ sampler = dataloader.sampler.sampler
231
+
232
+ if isinstance(sampler, SeedableRandomSampler):
233
+ dataloader.sampler.sampler = torch.load(input_sampler_file)
234
+ logger.info("All dataloader sampler states loaded successfully")
235
+
236
+ # GradScaler state
237
+ if scaler is not None:
238
+ input_scaler_file = input_dir.joinpath(SCALER_NAME)
239
+ scaler.load_state_dict(torch.load(input_scaler_file))
240
+ logger.info("GradScaler state loaded successfully")
241
+
242
+ # Random states
243
+ try:
244
+ states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
245
+ random.setstate(states["random_state"])
246
+ np.random.set_state(states["numpy_random_seed"])
247
+ torch.set_rng_state(states["torch_manual_seed"])
248
+ if is_xpu_available():
249
+ torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
250
+ else:
251
+ torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
252
+ if is_torch_xla_available():
253
+ xm.set_rng_state(states["xm_seed"])
254
+ logger.info("All random states loaded successfully")
255
+ except Exception:
256
+ logger.info("Could not load random states")
257
+
258
+
259
+ def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
260
+ """
261
+ Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
262
+ """
263
+ # Should this be the right way to get a qual_name type value from `obj`?
264
+ save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
265
+ logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
266
+ save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
267
+
268
+
269
+ def load_custom_state(obj, path, index: int = 0):
270
+ """
271
+ Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
272
+ """
273
+ load_location = f"{path}/custom_checkpoint_{index}.pkl"
274
+ logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
275
+ obj.load_state_dict(torch.load(load_location, map_location="cpu"))
env-llmeval/lib/python3.10/site-packages/accelerate/launchers.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ import sys
17
+ import tempfile
18
+
19
+ import torch
20
+
21
+ from .state import AcceleratorState, PartialState
22
+ from .utils import (
23
+ PrecisionType,
24
+ PrepareForLaunch,
25
+ are_libraries_initialized,
26
+ check_cuda_p2p_ib_support,
27
+ get_gpu_info,
28
+ is_mps_available,
29
+ patch_environment,
30
+ )
31
+
32
+
33
+ def test_launch():
34
+ "Verify a `PartialState` can be initialized."
35
+ _ = PartialState()
36
+
37
+
38
+ def notebook_launcher(
39
+ function,
40
+ args=(),
41
+ num_processes=None,
42
+ mixed_precision="no",
43
+ use_port="29500",
44
+ master_addr="127.0.0.1",
45
+ node_rank=0,
46
+ num_nodes=1,
47
+ ):
48
+ """
49
+ Launches a training function, using several processes or multiple nodes if it's possible in the current environment
50
+ (TPU with multiple cores for instance).
51
+
52
+ <Tip warning={true}>
53
+
54
+ To use this function absolutely zero calls to a CUDA device must be made in the notebook session before calling. If
55
+ any have been made, you will need to restart the notebook and make sure no cells use any CUDA capability.
56
+
57
+ Setting `ACCELERATE_DEBUG_MODE="1"` in your environment will run a test before truly launching to ensure that none
58
+ of those calls have been made.
59
+
60
+ </Tip>
61
+
62
+ Args:
63
+ function (`Callable`):
64
+ The training function to execute. If it accepts arguments, the first argument should be the index of the
65
+ process run.
66
+ args (`Tuple`):
67
+ Tuple of arguments to pass to the function (it will receive `*args`).
68
+ num_processes (`int`, *optional*):
69
+ The number of processes to use for training. Will default to 8 in Colab/Kaggle if a TPU is available, to
70
+ the number of GPUs available otherwise.
71
+ mixed_precision (`str`, *optional*, defaults to `"no"`):
72
+ If `fp16` or `bf16`, will use mixed precision training on multi-GPU.
73
+ use_port (`str`, *optional*, defaults to `"29500"`):
74
+ The port to use to communicate between processes when launching a multi-GPU training.
75
+ master_addr (`str`, *optional*, defaults to `"127.0.0.1"`):
76
+ The address to use for communication between processes.
77
+ node_rank (`int`, *optional*, defaults to 0):
78
+ The rank of the current node.
79
+ num_nodes (`int`, *optional*, defaults to 1):
80
+ The number of nodes to use for training.
81
+
82
+ Example:
83
+
84
+ ```python
85
+ # Assume this is defined in a Jupyter Notebook on an instance with two GPUs
86
+ from accelerate import notebook_launcher
87
+
88
+
89
+ def train(*args):
90
+ # Your training function here
91
+ ...
92
+
93
+
94
+ notebook_launcher(train, args=(arg1, arg2), num_processes=2, mixed_precision="fp16")
95
+ ```
96
+ """
97
+ # Are we in a google colab or a Kaggle Kernel?
98
+ in_colab = False
99
+ in_kaggle = False
100
+ if any(key.startswith("KAGGLE") for key in os.environ.keys()):
101
+ in_kaggle = True
102
+ elif "IPython" in sys.modules:
103
+ in_colab = "google.colab" in str(sys.modules["IPython"].get_ipython())
104
+
105
+ try:
106
+ mixed_precision = PrecisionType(mixed_precision.lower())
107
+ except ValueError:
108
+ raise ValueError(
109
+ f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}."
110
+ )
111
+
112
+ if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME", None) is not None):
113
+ # TPU launch
114
+ import torch_xla.distributed.xla_multiprocessing as xmp
115
+
116
+ if len(AcceleratorState._shared_state) > 0:
117
+ raise ValueError(
118
+ "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
119
+ "your training function. Restart your notebook and make sure no cells initializes an "
120
+ "`Accelerator`."
121
+ )
122
+ if num_processes is None:
123
+ num_processes = 8
124
+
125
+ launcher = PrepareForLaunch(function, distributed_type="TPU")
126
+ print(f"Launching a training on {num_processes} TPU cores.")
127
+ xmp.spawn(launcher, args=args, nprocs=num_processes, start_method="fork")
128
+ elif in_colab and get_gpu_info()[1] < 2:
129
+ # No need for a distributed launch otherwise as it's either CPU or one GPU.
130
+ if torch.cuda.is_available():
131
+ print("Launching training on one GPU.")
132
+ else:
133
+ print("Launching training on one CPU.")
134
+ function(*args)
135
+ else:
136
+ if num_processes is None:
137
+ raise ValueError(
138
+ "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call."
139
+ )
140
+ if node_rank >= num_nodes:
141
+ raise ValueError("The node_rank must be less than the number of nodes.")
142
+ if num_processes > 1:
143
+ # Multi-GPU launch
144
+ from torch.multiprocessing import start_processes
145
+ from torch.multiprocessing.spawn import ProcessRaisedException
146
+
147
+ if len(AcceleratorState._shared_state) > 0:
148
+ raise ValueError(
149
+ "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
150
+ "inside your training function. Restart your notebook and make sure no cells initializes an "
151
+ "`Accelerator`."
152
+ )
153
+ # Check for specific libraries known to initialize CUDA that users constantly use
154
+ problematic_imports = are_libraries_initialized("bitsandbytes")
155
+ if len(problematic_imports) > 0:
156
+ err = (
157
+ "Could not start distributed process. Libraries known to initialize CUDA upon import have been "
158
+ "imported already. Please keep these imports inside your training function to try and help with this:"
159
+ )
160
+ for lib_name in problematic_imports:
161
+ err += f"\n\t* `{lib_name}`"
162
+ raise RuntimeError(err)
163
+
164
+ patched_env = dict(
165
+ nproc=num_processes,
166
+ node_rank=node_rank,
167
+ world_size=num_nodes * num_processes,
168
+ master_addr=master_addr,
169
+ master_port=use_port,
170
+ mixed_precision=mixed_precision,
171
+ )
172
+
173
+ # Check for CUDA P2P and IB issues
174
+ if not check_cuda_p2p_ib_support():
175
+ patched_env["nccl_p2p_disable"] = "1"
176
+ patched_env["nccl_ib_disable"] = "1"
177
+
178
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
179
+ # process here (the other ones will be set be the launcher).
180
+ with patch_environment(**patched_env):
181
+ # First dummy launch
182
+ if os.environ.get("ACCELERATE_DEBUG_MODE", "false").lower() == "true":
183
+ launcher = PrepareForLaunch(test_launch, distributed_type="MULTI_GPU")
184
+ try:
185
+ start_processes(launcher, args=(), nprocs=num_processes, start_method="fork")
186
+ except ProcessRaisedException as e:
187
+ err = "An issue was found when verifying a stable environment for the notebook launcher."
188
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
189
+ raise RuntimeError(
190
+ f"{err}"
191
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
192
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
193
+ "which one is problematic and causing CUDA to be initialized."
194
+ ) from e
195
+ else:
196
+ raise RuntimeError(f"{err} The following error was raised: {e}") from e
197
+ # Now the actual launch
198
+ launcher = PrepareForLaunch(function, distributed_type="MULTI_GPU")
199
+ print(f"Launching training on {num_processes} GPUs.")
200
+ try:
201
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
202
+ except ProcessRaisedException as e:
203
+ if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
204
+ raise RuntimeError(
205
+ "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
206
+ "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
207
+ "Please review your imports and test them when running the `notebook_launcher()` to identify "
208
+ "which one is problematic and causing CUDA to be initialized."
209
+ ) from e
210
+ else:
211
+ raise RuntimeError(f"An issue was found when launching the training: {e}") from e
212
+
213
+ else:
214
+ # No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
215
+ if is_mps_available():
216
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
217
+ print("Launching training on MPS.")
218
+ elif torch.cuda.is_available():
219
+ print("Launching training on one GPU.")
220
+ else:
221
+ print("Launching training on CPU.")
222
+ function(*args)
223
+
224
+
225
+ def debug_launcher(function, args=(), num_processes=2):
226
+ """
227
+ Launches a training function using several processes on CPU for debugging purposes.
228
+
229
+ <Tip warning={true}>
230
+
231
+ This function is provided for internal testing and debugging, but it's not intended for real trainings. It will
232
+ only use the CPU.
233
+
234
+ </Tip>
235
+
236
+ Args:
237
+ function (`Callable`):
238
+ The training function to execute.
239
+ args (`Tuple`):
240
+ Tuple of arguments to pass to the function (it will receive `*args`).
241
+ num_processes (`int`, *optional*, defaults to 2):
242
+ The number of processes to use for training.
243
+ """
244
+ from torch.multiprocessing import start_processes
245
+
246
+ with tempfile.NamedTemporaryFile() as tmp_file:
247
+ # torch.distributed will expect a few environment variable to be here. We set the ones common to each
248
+ # process here (the other ones will be set be the launcher).
249
+ with patch_environment(
250
+ world_size=num_processes,
251
+ master_addr="127.0.0.1",
252
+ master_port="29500",
253
+ accelerate_mixed_precision="no",
254
+ accelerate_debug_rdv_file=tmp_file.name,
255
+ accelerate_use_cpu="yes",
256
+ ):
257
+ launcher = PrepareForLaunch(function, debug=True)
258
+ start_processes(launcher, args=args, nprocs=num_processes, start_method="fork")
env-llmeval/lib/python3.10/site-packages/accelerate/local_sgd.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+ from accelerate import Accelerator, DistributedType
17
+
18
+
19
+ class LocalSGD:
20
+ """
21
+ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently
22
+ on each device, and averages model weights every K synchronization step.
23
+
24
+ It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular,
25
+ this is a simple implementation that cannot support scenarios such as model parallelism.
26
+
27
+
28
+ Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes
29
+ back to at least:
30
+
31
+ Zhang, J., De Sa, C., Mitliagkas, I., & Ré, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint
32
+ arXiv:1606.07365.](https://arxiv.org/abs/1606.07365)
33
+
34
+ We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of).
35
+
36
+ Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on
37
+ Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767)
38
+
39
+ """
40
+
41
+ def __enter__(self):
42
+ if self.enabled:
43
+ self.model_sync_obj = self.model.no_sync()
44
+ self.model_sync_obj.__enter__()
45
+
46
+ return self
47
+
48
+ def __exit__(self, type, value, tb):
49
+ if self.enabled:
50
+ # Average all models on exit
51
+ self._sync_and_avg_model_params()
52
+ self.model_sync_obj.__exit__(type, value, tb)
53
+
54
+ def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True):
55
+ """
56
+ Constructor.
57
+
58
+ Args:
59
+ model (`torch.nn.Module):
60
+ The model whose parameters we need to average.
61
+ accelerator (`Accelerator`):
62
+ Accelerator object.
63
+ local_sgd_steps (`int`):
64
+ A number of local SGD steps (before model parameters are synchronized).
65
+ enabled (`bool):
66
+ Local SGD is disabled if this parameter set to `False`.
67
+ """
68
+ if accelerator.distributed_type not in [
69
+ DistributedType.NO,
70
+ DistributedType.MULTI_CPU,
71
+ DistributedType.MULTI_GPU,
72
+ DistributedType.MULTI_MLU,
73
+ DistributedType.MULTI_NPU,
74
+ ]:
75
+ raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)")
76
+ self.enabled = enabled and accelerator.distributed_type != DistributedType.NO
77
+ self.num_steps = 0
78
+ if self.enabled:
79
+ self.accelerator = accelerator
80
+ self.model = model
81
+ self.local_sgd_steps = local_sgd_steps
82
+
83
+ def step(self):
84
+ """
85
+ This function makes a "step" and synchronizes model parameters if necessary.
86
+ """
87
+ self.num_steps += 1
88
+ if not self.enabled:
89
+ return
90
+
91
+ if self.num_steps % self.local_sgd_steps == 0:
92
+ self._sync_and_avg_model_params()
93
+
94
+ def _sync_and_avg_model_params(self):
95
+ """
96
+ Synchronize + Average model parameters across all GPUs
97
+ """
98
+
99
+ self.accelerator.wait_for_everyone()
100
+ with self.accelerator.autocast():
101
+ for param in self.model.parameters():
102
+ param.data = self.accelerator.reduce(param.data, reduction="mean")
env-llmeval/lib/python3.10/site-packages/accelerate/logging.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ import logging
17
+ import os
18
+
19
+ from .state import PartialState
20
+
21
+
22
+ class MultiProcessAdapter(logging.LoggerAdapter):
23
+ """
24
+ An adapter to assist with logging in multiprocess.
25
+
26
+ `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes
27
+ or only the main executed one. Default is `main_process_only=True`.
28
+
29
+ Does not require an `Accelerator` object to be created first.
30
+ """
31
+
32
+ @staticmethod
33
+ def _should_log(main_process_only):
34
+ "Check if log should be performed"
35
+ state = PartialState()
36
+ return not main_process_only or (main_process_only and state.is_main_process)
37
+
38
+ def log(self, level, msg, *args, **kwargs):
39
+ """
40
+ Delegates logger call after checking if we should log.
41
+
42
+ Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes
43
+ or only the main executed one. Default is `True` if not passed
44
+
45
+ Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to
46
+ read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not
47
+ break with the previous behavior.
48
+
49
+ `in_order` is ignored if `main_process_only` is passed.
50
+ """
51
+ if PartialState._shared_state == {}:
52
+ raise RuntimeError(
53
+ "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility."
54
+ )
55
+ main_process_only = kwargs.pop("main_process_only", True)
56
+ in_order = kwargs.pop("in_order", False)
57
+
58
+ if self.isEnabledFor(level):
59
+ if self._should_log(main_process_only):
60
+ msg, kwargs = self.process(msg, kwargs)
61
+ self.logger.log(level, msg, *args, **kwargs)
62
+
63
+ elif in_order:
64
+ state = PartialState()
65
+ for i in range(state.num_processes):
66
+ if i == state.process_index:
67
+ msg, kwargs = self.process(msg, kwargs)
68
+ self.logger.log(level, msg, *args, **kwargs)
69
+ state.wait_for_everyone()
70
+
71
+ @functools.lru_cache(None)
72
+ def warning_once(self, *args, **kwargs):
73
+ """
74
+ This method is identical to `logger.warning()`, but will emit the warning with the same message only once
75
+
76
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the
77
+ cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to
78
+ switch to another type of cache that includes the caller frame information in the hashing function.
79
+ """
80
+ self.warning(*args, **kwargs)
81
+
82
+
83
+ def get_logger(name: str, log_level: str = None):
84
+ """
85
+ Returns a `logging.Logger` for `name` that can handle multiprocessing.
86
+
87
+ If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all
88
+ processes and in order, also pass `in_order=True`
89
+
90
+ Args:
91
+ name (`str`):
92
+ The name for the logger, such as `__file__`
93
+ log_level (`str`, *optional*):
94
+ The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not
95
+
96
+ Example:
97
+
98
+ ```python
99
+ >>> from accelerate.logging import get_logger
100
+ >>> from accelerate import Accelerator
101
+
102
+ >>> logger = get_logger(__name__)
103
+
104
+ >>> accelerator = Accelerator()
105
+ >>> logger.info("My log", main_process_only=False)
106
+ >>> logger.debug("My log", main_process_only=True)
107
+
108
+ >>> logger = get_logger(__name__, log_level="DEBUG")
109
+ >>> logger.info("My log")
110
+ >>> logger.debug("My second log")
111
+
112
+ >>> array = ["a", "b", "c", "d"]
113
+ >>> letter_at_rank = array[accelerator.process_index]
114
+ >>> logger.info(letter_at_rank, in_order=True)
115
+ ```
116
+ """
117
+ if log_level is None:
118
+ log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None)
119
+ logger = logging.getLogger(name)
120
+ if log_level is not None:
121
+ logger.setLevel(log_level.upper())
122
+ logger.root.setLevel(log_level.upper())
123
+ return MultiProcessAdapter(logger, {})
env-llmeval/lib/python3.10/site-packages/accelerate/optimizer.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ import warnings
17
+
18
+ import torch
19
+
20
+ from .state import AcceleratorState, GradientState
21
+ from .utils import DistributedType, honor_type, is_torch_xla_available
22
+
23
+
24
+ if is_torch_xla_available():
25
+ import torch_xla.core.xla_model as xm
26
+
27
+
28
+ def move_to_device(state, device):
29
+ if isinstance(state, (list, tuple)):
30
+ return honor_type(state, (move_to_device(t, device) for t in state))
31
+ elif isinstance(state, dict):
32
+ return type(state)({k: move_to_device(v, device) for k, v in state.items()})
33
+ elif isinstance(state, torch.Tensor):
34
+ return state.to(device)
35
+ return state
36
+
37
+
38
+ class AcceleratedOptimizer(torch.optim.Optimizer):
39
+ """
40
+ Internal wrapper around a torch optimizer.
41
+
42
+ Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient
43
+ accumulation.
44
+
45
+ Args:
46
+ optimizer (`torch.optim.optimizer.Optimizer`):
47
+ The optimizer to wrap.
48
+ device_placement (`bool`, *optional*, defaults to `True`):
49
+ Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
50
+ `optimizer` on the right device.
51
+ scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):
52
+ The scaler to use in the step function if training with mixed precision.
53
+ """
54
+
55
+ def __init__(self, optimizer, device_placement=True, scaler=None):
56
+ self.optimizer = optimizer
57
+ self.scaler = scaler
58
+ self.accelerator_state = AcceleratorState()
59
+ self.gradient_state = GradientState()
60
+ self.device_placement = device_placement
61
+ self._is_overflow = False
62
+
63
+ if self.scaler is not None:
64
+ self._accelerate_step_called = False
65
+ self._optimizer_original_step_method = self.optimizer.step
66
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
67
+
68
+ # Handle device placement
69
+ if device_placement:
70
+ state_dict = self.optimizer.state_dict()
71
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
72
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
73
+ else:
74
+ state_dict = move_to_device(state_dict, self.accelerator_state.device)
75
+ self.optimizer.load_state_dict(state_dict)
76
+
77
+ @property
78
+ def state(self):
79
+ return self.optimizer.state
80
+
81
+ @state.setter
82
+ def state(self, state):
83
+ self.optimizer.state = state
84
+
85
+ @property
86
+ def param_groups(self):
87
+ return self.optimizer.param_groups
88
+
89
+ @param_groups.setter
90
+ def param_groups(self, param_groups):
91
+ self.optimizer.param_groups = param_groups
92
+
93
+ @property
94
+ def defaults(self):
95
+ return self.optimizer.defaults
96
+
97
+ @defaults.setter
98
+ def defaults(self, defaults):
99
+ self.optimizer.defaults = defaults
100
+
101
+ def add_param_group(self, param_group):
102
+ self.optimizer.add_param_group(param_group)
103
+
104
+ def load_state_dict(self, state_dict):
105
+ if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement:
106
+ xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)
107
+ self.optimizer.load_state_dict(state_dict)
108
+
109
+ def state_dict(self):
110
+ return self.optimizer.state_dict()
111
+
112
+ def zero_grad(self, set_to_none=None):
113
+ if self.gradient_state.sync_gradients:
114
+ accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters
115
+ if accept_arg:
116
+ if set_to_none is None:
117
+ set_to_none = True
118
+ self.optimizer.zero_grad(set_to_none=set_to_none)
119
+ else:
120
+ if set_to_none is not None:
121
+ raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.")
122
+ self.optimizer.zero_grad()
123
+
124
+ def step(self, closure=None):
125
+ if (
126
+ not self.gradient_state.is_xla_gradients_synced
127
+ and self.accelerator_state.distributed_type == DistributedType.XLA
128
+ ):
129
+ gradients = xm._fetch_gradients(self.optimizer)
130
+ xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
131
+ self.gradient_state.is_xla_gradients_synced = True
132
+ if self.gradient_state.sync_gradients:
133
+ if self.scaler is not None:
134
+ self.optimizer.step = self._optimizer_patched_step_method
135
+
136
+ self.scaler.step(self.optimizer, closure)
137
+ self.scaler.update()
138
+
139
+ if not self._accelerate_step_called:
140
+ # If the optimizer step was skipped, gradient overflow was detected.
141
+ self._is_overflow = True
142
+ else:
143
+ self._is_overflow = False
144
+ # Reset the step method to the original one
145
+ self.optimizer.step = self._optimizer_original_step_method
146
+ # Reset the indicator
147
+ self._accelerate_step_called = False
148
+ else:
149
+ self.optimizer.step(closure)
150
+ if self.accelerator_state.distributed_type == DistributedType.XLA:
151
+ self.gradient_state.is_xla_gradients_synced = False
152
+
153
+ def _switch_parameters(self, parameters_map):
154
+ for param_group in self.optimizer.param_groups:
155
+ param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
156
+
157
+ @property
158
+ def is_overflow(self):
159
+ """Whether or not the optimizer step was done, or skipped because of gradient overflow."""
160
+ warnings.warn(
161
+ "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use "
162
+ "`optimizer.step_was_skipped` instead.",
163
+ FutureWarning,
164
+ )
165
+ return self._is_overflow
166
+
167
+ @property
168
+ def step_was_skipped(self):
169
+ """Whether or not the optimizer step was skipped."""
170
+ return self._is_overflow
171
+
172
+ def __getstate__(self):
173
+ _ignored_keys = [
174
+ "_accelerate_step_called",
175
+ "_optimizer_original_step_method",
176
+ "_optimizer_patched_step_method",
177
+ ]
178
+ return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys}
179
+
180
+ def __setstate__(self, state):
181
+ self.__dict__.update(state)
182
+ if self.scaler is not None:
183
+ self._accelerate_step_called = False
184
+ self._optimizer_original_step_method = self.optimizer.step
185
+ self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step)
186
+
187
+
188
+ def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method):
189
+ def patched_step(*args, **kwargs):
190
+ accelerated_optimizer._accelerate_step_called = True
191
+ return method(*args, **kwargs)
192
+
193
+ return patched_step
env-llmeval/lib/python3.10/site-packages/accelerate/scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
16
+
17
+ import warnings
18
+
19
+ from .state import AcceleratorState, GradientState
20
+
21
+
22
+ warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
23
+
24
+
25
+ class AcceleratedScheduler:
26
+ """
27
+ A wrapper around a learning rate scheduler that will only step when the optimizer(s) have a training step. Useful
28
+ to avoid making a scheduler step too fast when gradients went overflow and there was no training step (in mixed
29
+ precision training)
30
+
31
+ When performing gradient accumulation scheduler lengths should not be changed accordingly, Accelerate will always
32
+ step the scheduler to account for it.
33
+
34
+ Args:
35
+ scheduler (`torch.optim.lr_scheduler._LRScheduler`):
36
+ The scheduler to wrap.
37
+ optimizers (one or a list of `torch.optim.Optimizer`):
38
+ The optimizers used.
39
+ step_with_optimizer (`bool`, *optional*, defaults to `True`):
40
+ Whether or not the scheduler should be stepped at each optimizer step.
41
+ split_batches (`bool`, *optional*, defaults to `False`):
42
+ Whether or not the dataloaders split one batch across the different processes (so batch size is the same
43
+ regardless of the number of processes) or create batches on each process (so batch size is the original
44
+ batch size multiplied by the number of processes).
45
+ """
46
+
47
+ def __init__(self, scheduler, optimizers, step_with_optimizer: bool = True, split_batches: bool = False):
48
+ self.scheduler = scheduler
49
+ self.optimizers = optimizers if isinstance(optimizers, (list, tuple)) else [optimizers]
50
+ self.split_batches = split_batches
51
+ self.step_with_optimizer = step_with_optimizer
52
+ self.gradient_state = GradientState()
53
+
54
+ def step(self, *args, **kwargs):
55
+ if not self.step_with_optimizer:
56
+ # No link between scheduler and optimizer -> just step
57
+ self.scheduler.step(*args, **kwargs)
58
+ return
59
+
60
+ # Otherwise, first make sure the optimizer was stepped.
61
+ if not self.gradient_state.sync_gradients:
62
+ if self.gradient_state.adjust_scheduler:
63
+ self.scheduler._step_count += 1
64
+ return
65
+
66
+ for opt in self.optimizers:
67
+ if opt.step_was_skipped:
68
+ return
69
+ if self.split_batches:
70
+ # Split batches -> the training dataloader batch size is not changed so one step per training step
71
+ self.scheduler.step(*args, **kwargs)
72
+ else:
73
+ # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
74
+ # num_processes steps per training step
75
+ num_processes = AcceleratorState().num_processes
76
+ for _ in range(num_processes):
77
+ # Special case when using OneCycle and `drop_last` was not used
78
+ if hasattr(self.scheduler, "total_steps"):
79
+ if self.scheduler._step_count <= self.scheduler.total_steps:
80
+ self.scheduler.step(*args, **kwargs)
81
+ else:
82
+ self.scheduler.step(*args, **kwargs)
83
+
84
+ # Passthroughs
85
+ def get_last_lr(self):
86
+ return self.scheduler.get_last_lr()
87
+
88
+ def state_dict(self):
89
+ return self.scheduler.state_dict()
90
+
91
+ def load_state_dict(self, state_dict):
92
+ self.scheduler.load_state_dict(state_dict)
93
+
94
+ def get_lr(self):
95
+ return self.scheduler.get_lr()
96
+
97
+ def print_lr(self, *args, **kwargs):
98
+ return self.scheduler.print_lr(*args, **kwargs)
env-llmeval/lib/python3.10/site-packages/accelerate/state.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from __future__ import annotations
16
+
17
+ import logging
18
+ import math
19
+ import os
20
+ import threading
21
+ import warnings
22
+ from contextlib import contextmanager
23
+ from functools import partial
24
+ from typing import Any, Callable, Optional
25
+
26
+ import torch
27
+
28
+ from .utils import (
29
+ DistributedType,
30
+ DynamoBackend,
31
+ GradientAccumulationPlugin,
32
+ check_cuda_p2p_ib_support,
33
+ check_fp8_capability,
34
+ get_ccl_version,
35
+ get_cpu_distributed_information,
36
+ get_int_from_env,
37
+ is_ccl_available,
38
+ is_datasets_available,
39
+ is_deepspeed_available,
40
+ is_fp8_available,
41
+ is_ipex_available,
42
+ is_mlu_available,
43
+ is_mps_available,
44
+ is_npu_available,
45
+ is_torch_xla_available,
46
+ is_xpu_available,
47
+ parse_choice_from_env,
48
+ parse_flag_from_env,
49
+ set_numa_affinity,
50
+ )
51
+ from .utils.dataclasses import SageMakerDistributedType
52
+
53
+
54
+ if is_torch_xla_available():
55
+ import torch_xla.core.xla_model as xm
56
+
57
+ if is_mlu_available(check_device=False):
58
+ import torch_mlu # noqa: F401
59
+
60
+ if is_npu_available(check_device=False):
61
+ import torch_npu # noqa: F401
62
+
63
+ logger = logging.getLogger(__name__)
64
+
65
+
66
+ def is_initialized() -> bool:
67
+ """
68
+ Checks if the `AcceleratorState` has been initialized from `Accelerator`. Same as `AcceleratorState.initialized`,
69
+ but works as a module method.
70
+ """
71
+ return AcceleratorState._shared_state != {}
72
+
73
+
74
+ # Lambda function that does nothing
75
+ def do_nothing(*args, **kwargs):
76
+ return None
77
+
78
+
79
+ class ThreadLocalSharedDict(threading.local):
80
+ """
81
+ Descriptor that holds a dict shared between instances of a class in the same thread.
82
+
83
+ Note: Descriptors have slightly different semantics than just a dict field on its own.
84
+ `PartialState(...)._shared_state` and `PartialState._shared_state` (instance vs class) give the same value: the
85
+ underlying _storage dict. Likewise, `PartialState(...)._shared_state = {...}` overrides the _storage dict inside
86
+ the descriptor as you would expect. However, `PartialState._shared_state = {}` actually replaces the descriptor
87
+ object with a dict instead Thus, you should modify the _storage dict in-place (e.g. `_shared_state.clear()`).
88
+
89
+ See Python documentation for an explanation of descriptors: https://docs.python.org/3/howto/descriptor.html
90
+
91
+ This is required for using PyTorch/XLA with PJRT in multithreaded mode (required for TPU v2 and v3).
92
+
93
+ See https://github.com/pytorch/xla/blob/r2.0/docs/pjrt.md#multithreading-on-tpu-v2v3
94
+ """
95
+
96
+ def __init__(self, thread_local: bool = False):
97
+ self._storage = {}
98
+
99
+ def __get__(self, obj, objtype=None):
100
+ return self._storage
101
+
102
+ def __set__(self, obj, value):
103
+ self._storage = value
104
+
105
+
106
+ # Prefer global shared dictionary, except when using TPU.
107
+ SharedDict = dict if not is_torch_xla_available() else ThreadLocalSharedDict
108
+
109
+
110
+ # Inspired by Alex Martelli's 'Borg'.
111
+ class PartialState:
112
+ """
113
+ Singleton class that has information about the current training environment and functions to help with process
114
+ control. Designed to be used when only process control and device execution states are needed. Does *not* need to
115
+ be initialized from `Accelerator`.
116
+
117
+ Args:
118
+ cpu (`bool`, *optional*):
119
+ Whether or not to force the script to execute on CPU. Will ignore any accelerators available if set to
120
+ `True` and force the execution on the CPU.
121
+ kwargs (additional keyword arguments, *optional*):
122
+ Additional keyword arguments to pass to the relevent `init_process_group` function. Valid `kwargs` can be
123
+ found in [`utils.InitProcessGroupKwargs`]. See the example section for detailed usage.
124
+
125
+ **Available attributes:**
126
+
127
+ - **device** (`torch.device`) -- The device to use.
128
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
129
+ in use.
130
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
131
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
132
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
133
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
134
+ - **process_index** (`int`) -- The index of the current process.
135
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
136
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
137
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
138
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
139
+
140
+ Example:
141
+ ```python
142
+ from accelerate.utils import InitProcessGroupKwargs
143
+
144
+ # To include `InitProcessGroupKwargs`, init then call `.to_kwargs()`
145
+ kwargs = InitProcessGroupKwargs(...).to_kwargs()
146
+ state = PartialState(**kwargs)
147
+ ```
148
+ """
149
+
150
+ _shared_state = SharedDict()
151
+ _known_attrs = [
152
+ "_cpu",
153
+ "_mixed_precision",
154
+ "_shared_state",
155
+ "backend",
156
+ "debug",
157
+ "device",
158
+ "distributed_type",
159
+ "fork_launched",
160
+ "local_process_index",
161
+ "num_processes",
162
+ "process_index",
163
+ ]
164
+
165
+ def __init__(self, cpu: bool = False, **kwargs):
166
+ self.__dict__ = self._shared_state
167
+ if not self.initialized:
168
+ self._cpu = cpu
169
+ self.backend = None
170
+ env_device = os.environ.get("ACCELERATE_TORCH_DEVICE", None)
171
+ self.device = torch.device(env_device) if env_device is not None else None
172
+ self.debug = parse_flag_from_env("ACCELERATE_DEBUG_MODE")
173
+ use_sagemaker_dp = kwargs.pop("_use_sagemaker_dp", None)
174
+ dist_information = None
175
+ if use_sagemaker_dp is None:
176
+ use_sagemaker_dp = (
177
+ os.environ.get("ACCELERATE_USE_SAGEMAKER", "false") == "true"
178
+ and os.environ.get("ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE") != SageMakerDistributedType.NO
179
+ )
180
+
181
+ # Sets up self.backend + imports
182
+ backend, distributed_type = self._prepare_backend(cpu, use_sagemaker_dp, kwargs.pop("backend", None))
183
+ self.backend = backend
184
+ self.distributed_type = distributed_type
185
+ use_deepspeed = False
186
+ if not cpu and self.backend != "xla":
187
+ if int(os.environ.get("LOCAL_RANK", -1)) != -1:
188
+ # Deal with spawning deepspeed
189
+ if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true":
190
+ if not is_deepspeed_available():
191
+ raise ImportError(
192
+ "DeepSpeed is not available => install it using `pip3 install deepspeed` or build it from source"
193
+ )
194
+ from deepspeed import comm as dist
195
+
196
+ if is_xpu_available() and is_ccl_available():
197
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
198
+ os.environ["CCL_LOCAL_SIZE"] = os.environ.get("LOCAL_WORLD_SIZE", "1")
199
+ os.environ["CCL_LOCAL_RANK"] = os.environ.get("LOCAL_RANK", "0")
200
+
201
+ if not dist.is_initialized():
202
+ dist.init_distributed(dist_backend=self.backend, auto_mpi_discovery=False, **kwargs)
203
+ # We need to flag to `use_deepspeed` to be True to override `distributed_type` later
204
+ use_deepspeed = True
205
+ # Deal with all other backends but XPU and CPU, that gets handled special later
206
+ elif (
207
+ self.distributed_type not in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU)
208
+ and not torch.distributed.is_initialized()
209
+ ):
210
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
211
+ # XPU and CPU require special env configs to be set
212
+ if self.distributed_type in (DistributedType.MULTI_XPU, DistributedType.MULTI_CPU):
213
+ dist_information = get_cpu_distributed_information()
214
+ os.environ["RANK"] = str(dist_information.rank)
215
+ os.environ["WORLD_SIZE"] = str(dist_information.world_size)
216
+ os.environ["LOCAL_RANK"] = str(dist_information.local_rank)
217
+ os.environ["LOCAL_WORLD_SIZE"] = str(dist_information.local_world_size)
218
+ if self.backend == "ccl" and self.distributed_type == DistributedType.MULTI_XPU:
219
+ os.environ["CCL_PROCESS_LAUNCHER"] = "none"
220
+ os.environ["CCL_LOCAL_SIZE"] = os.environ["LOCAL_WORLD_SIZE"]
221
+ os.environ["CCL_LOCAL_RANK"] = os.environ["LOCAL_RANK"]
222
+ if not os.environ.get("MASTER_PORT", None):
223
+ os.environ["MASTER_PORT"] = "29500"
224
+ if (
225
+ not os.environ.get("MASTER_ADDR", None)
226
+ and dist_information.local_world_size != dist_information.world_size
227
+ and self.backend != "mpi"
228
+ ):
229
+ raise ValueError(
230
+ "Tried to launch on distributed with multinode, but `MASTER_ADDR` env was not set, "
231
+ "please try exporting rank 0's hostname as `MASTER_ADDR`"
232
+ )
233
+ kwargs["rank"] = dist_information.rank
234
+ kwargs["world_size"] = dist_information.world_size
235
+
236
+ if (
237
+ self.distributed_type == DistributedType.MULTI_CPU
238
+ and get_int_from_env(["OMP_NUM_THREADS", "OMP_NUM_THREADS"], 0) > 0
239
+ ):
240
+ import psutil
241
+
242
+ num_cpu_threads_per_process = int(
243
+ psutil.cpu_count(logical=False) / dist_information.local_world_size
244
+ )
245
+ if num_cpu_threads_per_process == 0:
246
+ num_cpu_threads_per_process = 1
247
+ torch.set_num_threads(num_cpu_threads_per_process)
248
+ warnings.warn(
249
+ f"OMP_NUM_THREADS/MKL_NUM_THREADS unset, we set it at {num_cpu_threads_per_process} to improve oob"
250
+ " performance."
251
+ )
252
+
253
+ if not torch.distributed.is_initialized():
254
+ torch.distributed.init_process_group(backend=self.backend, **kwargs)
255
+
256
+ # No backend == no distributed training
257
+ if self.backend is None:
258
+ self.distributed_type = DistributedType.NO
259
+ self.num_processes = 1
260
+ self.process_index = 0
261
+ self.local_process_index = 0
262
+ elif self.backend == "xla":
263
+ # XLA needs device setting first for `set_replication`
264
+ self.set_device()
265
+ xm.set_replication(self.device, xm.get_xla_supported_devices())
266
+ self.num_processes = xm.xrt_world_size()
267
+ self.process_index = xm.get_ordinal()
268
+ if is_torch_xla_available(check_is_tpu=True):
269
+ self.local_process_index = xm.get_local_ordinal()
270
+ else:
271
+ self.local_process_index = int(os.environ.get("LOCAL_RANK", -1))
272
+ else:
273
+ self.num_processes = torch.distributed.get_world_size()
274
+ self.process_index = torch.distributed.get_rank()
275
+ self.local_process_index = (
276
+ int(os.environ.get("LOCAL_RANK", -1)) if dist_information is None else dist_information.local_rank
277
+ )
278
+ self.set_device()
279
+ # Now we can change to deepseed
280
+ if use_deepspeed:
281
+ self.distributed_type = DistributedType.DEEPSPEED
282
+
283
+ # Set CPU affinity if enabled
284
+ if parse_flag_from_env("ACCELERATE_CPU_AFFINITY", False):
285
+ set_numa_affinity(self.local_process_index)
286
+
287
+ # Check for old RTX 4000's that can't use P2P or IB and are on old drivers
288
+ if self.device.type == "cuda" and not check_cuda_p2p_ib_support():
289
+ if "NCCL_P2P_DISABLE" not in os.environ or "NCCL_IB_DISABLE" not in os.environ:
290
+ raise NotImplementedError(
291
+ "Using RTX 4000 series doesn't support faster communication broadband via P2P or IB. "
292
+ 'Please set `NCCL_P2P_DISABLE="1"` and `NCCL_IB_DISABLE="1" or use `accelerate launch` which '
293
+ "will do this automatically."
294
+ )
295
+ # Important: This should be the *only* code outside of `self.initialized!`
296
+ self.fork_launched = parse_flag_from_env("FORK_LAUNCHED", 0)
297
+
298
+ def __repr__(self) -> str:
299
+ return (
300
+ f"Distributed environment: {self.distributed_type}{(' Backend: ' + self.backend) if self.backend else ''}\n"
301
+ f"Num processes: {self.num_processes}\n"
302
+ f"Process index: {self.process_index}\n"
303
+ f"Local process index: {self.local_process_index}\n"
304
+ f"Device: {self.device}\n"
305
+ )
306
+
307
+ @staticmethod
308
+ def _reset_state():
309
+ "Resets `_shared_state`, is used internally and should not be called"
310
+ PartialState._shared_state.clear()
311
+
312
+ @property
313
+ def initialized(self) -> bool:
314
+ "Returns whether the `PartialState` has been initialized"
315
+ return self._shared_state != {}
316
+
317
+ @property
318
+ def use_distributed(self):
319
+ """
320
+ Whether the Accelerator is configured for distributed training
321
+ """
322
+ return self.distributed_type != DistributedType.NO and self.num_processes > 1
323
+
324
+ @property
325
+ def is_last_process(self) -> bool:
326
+ "Returns whether the current process is the last one"
327
+ return self.process_index == self.num_processes - 1
328
+
329
+ @property
330
+ def is_main_process(self) -> bool:
331
+ "Returns whether the current process is the main process"
332
+ return (
333
+ self.process_index == 0 if self.distributed_type != DistributedType.MEGATRON_LM else self.is_last_process
334
+ )
335
+
336
+ @property
337
+ def is_local_main_process(self) -> bool:
338
+ "Returns whether the current process is the main process on the local node"
339
+ return (
340
+ self.local_process_index == 0
341
+ if self.distributed_type != DistributedType.MEGATRON_LM
342
+ else self.is_last_process
343
+ )
344
+
345
+ def wait_for_everyone(self):
346
+ """
347
+ Will stop the execution of the current process until every other process has reached that point (so this does
348
+ nothing when the script is only run in one process). Useful to do before saving a model.
349
+
350
+ Example:
351
+
352
+ ```python
353
+ >>> # Assuming two GPU processes
354
+ >>> import time
355
+ >>> from accelerate.state import PartialState
356
+
357
+ >>> state = PartialState()
358
+ >>> if state.is_main_process:
359
+ ... time.sleep(2)
360
+ >>> else:
361
+ ... print("I'm waiting for the main process to finish its sleep...")
362
+ >>> state.wait_for_everyone()
363
+ >>> # Should print on every process at the same time
364
+ >>> print("Everyone is here")
365
+ ```
366
+ """
367
+ if self.distributed_type in (
368
+ DistributedType.MULTI_GPU,
369
+ DistributedType.MULTI_MLU,
370
+ DistributedType.MULTI_NPU,
371
+ DistributedType.MULTI_XPU,
372
+ DistributedType.MULTI_CPU,
373
+ DistributedType.DEEPSPEED,
374
+ DistributedType.FSDP,
375
+ ):
376
+ torch.distributed.barrier()
377
+ elif self.distributed_type == DistributedType.XLA:
378
+ xm.rendezvous("accelerate.utils.wait_for_everyone")
379
+
380
+ def _goes_first(self, is_main: bool):
381
+ if not is_main:
382
+ self.wait_for_everyone()
383
+
384
+ yield
385
+
386
+ if is_main:
387
+ self.wait_for_everyone()
388
+
389
+ @contextmanager
390
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
391
+ """
392
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
393
+ distributed inference, such as with different prompts.
394
+
395
+ Note that when using a `dict`, all keys need to have the same number of elements.
396
+
397
+ Args:
398
+ inputs (`list`, `tuple`, `torch.Tensor`, `dict` of `list`/`tuple`/`torch.Tensor`, or `datasets.Dataset`):
399
+ The input to split between processes.
400
+ apply_padding (`bool`, `optional`, defaults to `False`):
401
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
402
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
403
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
404
+
405
+
406
+ Example:
407
+
408
+ ```python
409
+ # Assume there are two processes
410
+ from accelerate import PartialState
411
+
412
+ state = PartialState()
413
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
414
+ print(inputs)
415
+ # Process 0
416
+ ["A", "B"]
417
+ # Process 1
418
+ ["C"]
419
+
420
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
421
+ print(inputs)
422
+ # Process 0
423
+ ["A", "B"]
424
+ # Process 1
425
+ ["C", "C"]
426
+ ```
427
+ """
428
+ if self.num_processes == 1:
429
+ yield inputs
430
+ return
431
+ length = len(inputs)
432
+ # Nested dictionary of any types
433
+ if isinstance(inputs, dict):
434
+ length = len(inputs[list(inputs.keys())[0]])
435
+ if not all(len(v) == length for v in inputs.values()):
436
+ raise ValueError("All values in the dictionary must have the same length")
437
+ num_samples_per_process = math.ceil(length / self.num_processes)
438
+ start_index = self.process_index * num_samples_per_process
439
+ end_index = start_index + num_samples_per_process
440
+ if (len(inputs) % self.num_processes != 0) and (self.process_index == self.num_processes - 1):
441
+ end_index = length
442
+
443
+ def _split_values(inputs, start_index, end_index):
444
+ if isinstance(inputs, (list, tuple, torch.Tensor)):
445
+ if start_index >= len(inputs):
446
+ result = inputs[-1:]
447
+ else:
448
+ result = inputs[start_index:end_index]
449
+ if apply_padding:
450
+ if isinstance(result, torch.Tensor):
451
+ from accelerate.utils import pad_across_processes, send_to_device
452
+
453
+ # The tensor needs to be on the device before we can pad it
454
+ tensorized_result = send_to_device(result, self.device)
455
+ result = pad_across_processes(tensorized_result, pad_index=inputs[-1])
456
+ else:
457
+ result += [result[-1]] * (num_samples_per_process - len(result))
458
+ return result
459
+ elif isinstance(inputs, dict):
460
+ for key in inputs.keys():
461
+ inputs[key] = _split_values(inputs[key], start_index, end_index)
462
+ return inputs
463
+ else:
464
+ if is_datasets_available():
465
+ from datasets import Dataset
466
+
467
+ if isinstance(inputs, Dataset):
468
+ if start_index >= len(inputs):
469
+ start_index = len(inputs) - 1
470
+ if end_index > len(inputs):
471
+ end_index = len(inputs)
472
+ result_idcs = list(range(start_index, end_index))
473
+ if apply_padding:
474
+ result_idcs += [end_index - 1] * (num_samples_per_process - len(result_idcs))
475
+ return inputs.select(result_idcs)
476
+ return inputs
477
+
478
+ yield _split_values(inputs, start_index, end_index)
479
+
480
+ @contextmanager
481
+ def main_process_first(self):
482
+ """
483
+ Lets the main process go first inside a with block.
484
+
485
+ The other processes will enter the with block after the main process exits.
486
+
487
+ Example:
488
+
489
+ ```python
490
+ >>> from accelerate import Accelerator
491
+
492
+ >>> accelerator = Accelerator()
493
+ >>> with accelerator.main_process_first():
494
+ ... # This will be printed first by process 0 then in a seemingly
495
+ ... # random order by the other processes.
496
+ ... print(f"This will be printed by process {accelerator.process_index}")
497
+ ```
498
+ """
499
+ yield from self._goes_first(self.is_main_process)
500
+
501
+ @contextmanager
502
+ def local_main_process_first(self):
503
+ """
504
+ Lets the local main process go inside a with block.
505
+
506
+ The other processes will enter the with block after the main process exits.
507
+
508
+ Example:
509
+
510
+ ```python
511
+ >>> from accelerate.state import PartialState
512
+
513
+ >>> state = PartialState()
514
+ >>> with state.local_main_process_first():
515
+ ... # This will be printed first by local process 0 then in a seemingly
516
+ ... # random order by the other processes.
517
+ ... print(f"This will be printed by process {state.local_process_index}")
518
+ ```
519
+ """
520
+ yield from self._goes_first(self.is_local_main_process)
521
+
522
+ def on_main_process(self, function: Callable[..., Any] = None):
523
+ """
524
+ Decorator that only runs the decorated function on the main process.
525
+
526
+ Args:
527
+ function (`Callable`): The function to decorate.
528
+
529
+ Example:
530
+
531
+ ```python
532
+ >>> from accelerate.state import PartialState
533
+
534
+ >>> state = PartialState()
535
+
536
+
537
+ >>> @state.on_main_process
538
+ ... def print_something():
539
+ ... print("This will be printed by process 0 only.")
540
+
541
+
542
+ >>> print_something()
543
+ "This will be printed by process 0 only"
544
+ ```
545
+ """
546
+ if not self.initialized:
547
+ raise ValueError("The `PartialState` or `Accelerator` must be initialized before calling this function.")
548
+ if self.is_main_process or not self.use_distributed:
549
+ return function
550
+ return do_nothing
551
+
552
+ def on_local_main_process(self, function: Callable[..., Any] = None):
553
+ """
554
+ Decorator that only runs the decorated function on the local main process.
555
+
556
+ Args:
557
+ function (`Callable`): The function to decorate.
558
+
559
+ Example:
560
+ ```python
561
+ # Assume we have 2 servers with 4 processes each.
562
+ from accelerate.state import PartialState
563
+
564
+ state = PartialState()
565
+
566
+
567
+ @state.on_local_main_process
568
+ def print_something():
569
+ print("This will be printed by process 0 only on each server.")
570
+
571
+
572
+ print_something()
573
+ # On server 1:
574
+ "This will be printed by process 0 only"
575
+ # On server 2:
576
+ "This will be printed by process 0 only"
577
+ ```
578
+ """
579
+ if self.is_local_main_process or not self.use_distributed:
580
+ return function
581
+ return do_nothing
582
+
583
+ def on_last_process(self, function: Callable[..., Any]):
584
+ """
585
+ Decorator that only runs the decorated function on the last process.
586
+
587
+ Args:
588
+ function (`Callable`): The function to decorate.
589
+
590
+ Example:
591
+ ```python
592
+ # Assume we have 4 processes.
593
+ from accelerate.state import PartialState
594
+
595
+ state = PartialState()
596
+
597
+
598
+ @state.on_last_process
599
+ def print_something():
600
+ print(f"Printed on process {state.process_index}")
601
+
602
+
603
+ print_something()
604
+ "Printed on process 3"
605
+ ```
606
+ """
607
+ if self.is_last_process or not self.use_distributed:
608
+ return function
609
+ return do_nothing
610
+
611
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
612
+ """
613
+ Decorator that only runs the decorated function on the process with the given index.
614
+
615
+ Args:
616
+ function (`Callable`, `optional`):
617
+ The function to decorate.
618
+ process_index (`int`, `optional`):
619
+ The index of the process on which to run the function.
620
+
621
+ Example:
622
+ ```python
623
+ # Assume we have 4 processes.
624
+ from accelerate.state import PartialState
625
+
626
+ state = PartialState()
627
+
628
+
629
+ @state.on_process(process_index=2)
630
+ def print_something():
631
+ print(f"Printed on process {state.process_index}")
632
+
633
+
634
+ print_something()
635
+ "Printed on process 2"
636
+ ```
637
+ """
638
+ if function is None:
639
+ return partial(self.on_process, process_index=process_index)
640
+ if (self.process_index == process_index) or (not self.use_distributed):
641
+ return function
642
+ return do_nothing
643
+
644
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
645
+ """
646
+ Decorator that only runs the decorated function on the process with the given index on the current node.
647
+
648
+ Args:
649
+ function (`Callable`, *optional*):
650
+ The function to decorate.
651
+ local_process_index (`int`, *optional*):
652
+ The index of the local process on which to run the function.
653
+
654
+ Example:
655
+ ```python
656
+ # Assume we have 2 servers with 4 processes each.
657
+ from accelerate import Accelerator
658
+
659
+ accelerator = Accelerator()
660
+
661
+
662
+ @accelerator.on_local_process(local_process_index=2)
663
+ def print_something():
664
+ print(f"Printed on process {accelerator.local_process_index}")
665
+
666
+
667
+ print_something()
668
+ # On server 1:
669
+ "Printed on process 2"
670
+ # On server 2:
671
+ "Printed on process 2"
672
+ ```
673
+ """
674
+ if function is None:
675
+ return partial(self.on_local_process, local_process_index=local_process_index)
676
+ if (self.local_process_index == local_process_index) or (not self.use_distributed):
677
+ return function
678
+ return do_nothing
679
+
680
+ def print(self, *args, **kwargs):
681
+ if self.is_local_main_process:
682
+ print(*args, **kwargs)
683
+
684
+ @property
685
+ def default_device(self) -> torch.device:
686
+ """
687
+ Returns the default device which is:
688
+ - MPS if `torch.backends.mps.is_available()` and `torch.backends.mps.is_built()` both return True.
689
+ - CUDA if `torch.cuda.is_available()`
690
+ - MLU if `is_mlu_available()`
691
+ - NPU if `is_npu_available()`
692
+ - CPU otherwise
693
+ """
694
+ if is_mps_available():
695
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
696
+ return torch.device("mps")
697
+ elif is_mlu_available():
698
+ return torch.device("mlu")
699
+ elif torch.cuda.is_available():
700
+ return torch.device("cuda")
701
+ elif is_xpu_available():
702
+ return torch.device("xpu:0")
703
+ elif is_npu_available():
704
+ return torch.device("npu")
705
+ else:
706
+ return torch.device("cpu")
707
+
708
+ def _prepare_backend(
709
+ self, cpu: bool = False, sagemaker_dp=False, backend: str = None
710
+ ) -> tuple[str, DistributedType]:
711
+ "Prepares any imports needed before initializing the distributed backend and sets `self.backend` properly"
712
+ distributed_type = None
713
+ if sagemaker_dp:
714
+ import smdistributed.dataparallel.torch.torch_smddp # noqa
715
+
716
+ backend = "smddp"
717
+ distributed_type = DistributedType.MULTI_GPU
718
+ elif is_torch_xla_available():
719
+ backend = "xla"
720
+ distributed_type = DistributedType.XLA
721
+ elif int(os.environ.get("LOCAL_RANK", -1)) != -1:
722
+ if not cpu:
723
+ if is_mlu_available():
724
+ backend = "cncl"
725
+ distributed_type = DistributedType.MULTI_MLU
726
+ elif torch.cuda.is_available():
727
+ if backend is None:
728
+ backend = "nccl"
729
+ distributed_type = DistributedType.MULTI_GPU
730
+ elif is_npu_available():
731
+ backend = "hccl"
732
+ distributed_type = DistributedType.MULTI_NPU
733
+ if backend is None and (
734
+ int(os.environ.get("LOCAL_RANK", -1)) != -1
735
+ or get_int_from_env(["PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE", "WORLD_SIZE"], 1) > 1
736
+ ):
737
+ if not cpu and is_xpu_available():
738
+ distributed_type = DistributedType.MULTI_XPU
739
+ else:
740
+ distributed_type = DistributedType.MULTI_CPU
741
+ if is_ccl_available() and (
742
+ get_int_from_env(["CCL_WORKER_COUNT"], 0) > 0 or distributed_type == DistributedType.MULTI_XPU
743
+ ):
744
+ if get_ccl_version() >= "1.12":
745
+ import oneccl_bindings_for_pytorch # noqa: F401
746
+ else:
747
+ import torch_ccl # noqa: F401
748
+
749
+ backend = "ccl"
750
+ elif torch.distributed.is_mpi_available():
751
+ backend = "mpi"
752
+ else:
753
+ backend = "gloo"
754
+ if distributed_type is None:
755
+ distributed_type = DistributedType.NO
756
+ return backend, distributed_type
757
+
758
+ def set_device(self):
759
+ """
760
+ Sets the device in `self.device` to the current distributed environment.
761
+ """
762
+ if self.device is not None:
763
+ return
764
+ if self.distributed_type == DistributedType.NO:
765
+ self.device = torch.device("cpu") if self._cpu else self.default_device
766
+ return
767
+ device = str(self.distributed_type).split(".")[-1].replace("MULTI_", "").lower()
768
+ if device not in ("cpu", "gpu", "mlu", "npu", "xpu", "xla"):
769
+ raise ValueError(
770
+ f"Can't set device for {self.distributed_type} ({device}), verify we should be calling `_set_device()` for it!"
771
+ )
772
+ if device == "xla":
773
+ self.device = xm.xla_device()
774
+ else:
775
+ if device == "gpu":
776
+ device = "cuda"
777
+ self.device = torch.device(device, self.local_process_index)
778
+ if self.device is not None:
779
+ if device == "xpu":
780
+ torch.xpu.set_device(self.device)
781
+ elif device == "mlu":
782
+ torch.mlu.set_device(self.device)
783
+ elif device == "npu":
784
+ torch.npu.set_device(self.device)
785
+ elif device == "cuda":
786
+ torch.cuda.set_device(self.device)
787
+
788
+ def __getattr__(self, name: str):
789
+ # By this point we know that no attributes of `self` contain `name`,
790
+ # so we just modify the error message
791
+ if name in self._known_attrs:
792
+ raise AttributeError(
793
+ f"`PartialState` object has no attribute `{name}`. "
794
+ "This happens if `PartialState._reset_state()` was called and "
795
+ "an `Accelerator` or `PartialState` was not reinitialized."
796
+ )
797
+ # Raise a typical AttributeError
798
+ raise AttributeError(f"'PartialState' object has no attribute '{name}'")
799
+
800
+
801
+ class AcceleratorState:
802
+ """
803
+ Singleton class that has information about the current training environment.
804
+
805
+ **Available attributes:**
806
+
807
+ - **device** (`torch.device`) -- The device to use.
808
+ - **distributed_type** ([`~accelerate.state.DistributedType`]) -- The type of distributed environment currently
809
+ in use.
810
+ - **initialized** (`bool`) -- Whether or not the `AcceleratorState` has been initialized from `Accelerator`.
811
+ - **local_process_index** (`int`) -- The index of the current process on the current server.
812
+ - **mixed_precision** (`str`) -- Whether or not the current script will use mixed precision, and if so the type
813
+ of mixed precision being performed. (Choose from 'no','fp16','bf16 or 'fp8').
814
+ - **num_processes** (`int`) -- The number of processes currently launched in parallel.
815
+ - **process_index** (`int`) -- The index of the current process.
816
+ - **is_last_process** (`bool`) -- Whether or not the current process is the last one.
817
+ - **is_main_process** (`bool`) -- Whether or not the current process is the main one.
818
+ - **is_local_main_process** (`bool`) -- Whether or not the current process is the main one on the local node.
819
+ - **debug** (`bool`) -- Whether or not the current script is being run in debug mode.
820
+ """
821
+
822
+ _shared_state = SharedDict()
823
+ _known_attrs = PartialState._known_attrs + [
824
+ "deepspeed_plugin",
825
+ "use_ipex",
826
+ "fsdp_plugin",
827
+ "megatron_lm_plugin",
828
+ "dynamo_plugin",
829
+ ]
830
+
831
+ def __init__(
832
+ self,
833
+ mixed_precision: str = None,
834
+ cpu: bool = False,
835
+ dynamo_plugin=None,
836
+ deepspeed_plugin=None,
837
+ fsdp_plugin=None,
838
+ megatron_lm_plugin=None,
839
+ _from_accelerator: bool = False,
840
+ **kwargs,
841
+ ):
842
+ self.__dict__ = self._shared_state
843
+ if parse_flag_from_env("ACCELERATE_USE_CPU"):
844
+ cpu = True
845
+ if PartialState._shared_state == {}:
846
+ PartialState(cpu, **kwargs)
847
+ self.__dict__.update(PartialState._shared_state)
848
+ self._check_initialized(mixed_precision, cpu)
849
+ if not self.initialized:
850
+ self.deepspeed_plugin = None
851
+ self.use_ipex = None
852
+ mixed_precision = (
853
+ parse_choice_from_env("ACCELERATE_MIXED_PRECISION", "no")
854
+ if mixed_precision is None
855
+ else mixed_precision.lower()
856
+ )
857
+ if mixed_precision == "fp8":
858
+ if not is_fp8_available():
859
+ raise ValueError(
860
+ "Using `fp8` precision requires `transformer_engine` or `MS-AMP` to be installed."
861
+ )
862
+ elif not check_fp8_capability():
863
+ logger.warning(
864
+ f"The current device has compute capability of {torch.cuda.get_device_capability()} which is "
865
+ "insufficient for FP8 mixed precision training (requires a GPU Hopper/Ada Lovelace "
866
+ "or higher, compute capability of 8.9 or higher). Will use FP16 instead."
867
+ )
868
+ mixed_precision = "fp16"
869
+
870
+ self.dynamo_plugin = dynamo_plugin
871
+ if not _from_accelerator:
872
+ raise ValueError(
873
+ "Please make sure to properly initialize your accelerator via `accelerator = Accelerator()` "
874
+ "before using any functionality from the `accelerate` library."
875
+ )
876
+ # deepspeed handles mixed_precision using deepspeed_config
877
+ self._mixed_precision = "no" if self.distributed_type == DistributedType.DEEPSPEED else mixed_precision
878
+ if self.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_tpu=True):
879
+ if mixed_precision == "bf16":
880
+ if os.environ.get("ACCELERATE_DOWNCAST_BF16"):
881
+ os.environ["XLA_USE_BF16"] = str(0)
882
+ os.environ["XLA_DOWNCAST_BF16"] = str(1)
883
+ self.downcast_bfloat = True
884
+ else:
885
+ os.environ["XLA_USE_BF16"] = str(1)
886
+ os.environ["XLA_DOWNCAST_BF16"] = str(0)
887
+ self.downcast_bfloat = False
888
+ elif os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" and not cpu:
889
+ self.deepspeed_plugin = deepspeed_plugin
890
+ elif self.distributed_type in [
891
+ DistributedType.MULTI_GPU,
892
+ DistributedType.MULTI_MLU,
893
+ DistributedType.MULTI_NPU,
894
+ DistributedType.MULTI_XPU,
895
+ ]:
896
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true":
897
+ self.distributed_type = DistributedType.FSDP
898
+ if self._mixed_precision != "no":
899
+ fsdp_plugin.set_mixed_precision(self._mixed_precision)
900
+ self.fsdp_plugin = fsdp_plugin
901
+ if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" and self.distributed_type not in [
902
+ DistributedType.MULTI_NPU,
903
+ DistributedType.MULTI_XPU,
904
+ ]:
905
+ self.distributed_type = DistributedType.MEGATRON_LM
906
+ megatron_lm_plugin.set_mixed_precision(self._mixed_precision)
907
+ self.megatron_lm_plugin = megatron_lm_plugin
908
+ elif self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
909
+ if is_ipex_available():
910
+ # check if user disables it explicitly
911
+ self.use_ipex = parse_flag_from_env("ACCELERATE_USE_IPEX", default=True)
912
+ else:
913
+ self.use_ipex = False
914
+ if (
915
+ self.dynamo_plugin.backend != DynamoBackend.NO
916
+ and self._mixed_precision == "no"
917
+ and self.device.type == "cuda"
918
+ ):
919
+ torch.backends.cuda.matmul.allow_tf32 = True
920
+ PartialState._shared_state["distributed_type"] = self.distributed_type
921
+
922
+ @property
923
+ def initialized(self) -> bool:
924
+ return self._shared_state != PartialState._shared_state
925
+
926
+ def __repr__(self):
927
+ repr = PartialState().__repr__() + f"\nMixed precision type: {self.mixed_precision}\n"
928
+ if self.distributed_type == DistributedType.DEEPSPEED:
929
+ repr += f"ds_config: {self.deepspeed_plugin.deepspeed_config}\n"
930
+ return repr
931
+
932
+ def _check_initialized(self, mixed_precision=None, cpu=None):
933
+ "Checks if a modification is trying to be made and the `AcceleratorState` has already been initialized"
934
+ if self.initialized:
935
+ err = "AcceleratorState has already been initialized and cannot be changed, restart your runtime completely and pass `{flag}` to `Accelerator()`."
936
+ if cpu and self.device.type != "cpu":
937
+ raise ValueError(err.format(flag="cpu=True"))
938
+ if (
939
+ mixed_precision is not None
940
+ and mixed_precision != self._mixed_precision
941
+ and self.distributed_type != DistributedType.DEEPSPEED
942
+ ):
943
+ raise ValueError(err.format(flag=f"mixed_precision='{mixed_precision}'"))
944
+
945
+ # For backward compatibility
946
+ @property
947
+ def use_fp16(self):
948
+ warnings.warn(
949
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
950
+ "`AcceleratorState.mixed_precision == 'fp16'` instead.",
951
+ FutureWarning,
952
+ )
953
+ return self._mixed_precision != "no"
954
+
955
+ @property
956
+ def mixed_precision(self):
957
+ if self.distributed_type == DistributedType.DEEPSPEED:
958
+ config = self.deepspeed_plugin.deepspeed_config
959
+ if config.get("fp16", {}).get("enabled", False):
960
+ mixed_precision = "fp16"
961
+ elif config.get("bf16", {}).get("enabled", False):
962
+ mixed_precision = "bf16"
963
+ else:
964
+ mixed_precision = "no"
965
+ else:
966
+ mixed_precision = self._mixed_precision
967
+ return mixed_precision
968
+
969
+ @staticmethod
970
+ def _reset_state(reset_partial_state: bool = False):
971
+ "Resets `_shared_state`, is used internally and should not be called"
972
+ AcceleratorState._shared_state.clear()
973
+ if reset_partial_state:
974
+ PartialState._reset_state()
975
+
976
+ @property
977
+ def use_distributed(self):
978
+ """
979
+ Whether the Accelerator is configured for distributed training
980
+ """
981
+ return PartialState().use_distributed
982
+
983
+ @property
984
+ def is_last_process(self) -> bool:
985
+ "Returns whether the current process is the last one"
986
+ return PartialState().is_last_process
987
+
988
+ @property
989
+ def is_main_process(self) -> bool:
990
+ "Returns whether the current process is the main process"
991
+ return PartialState().is_main_process
992
+
993
+ @property
994
+ def is_local_main_process(self) -> bool:
995
+ "Returns whether the current process is the main process on the local node"
996
+ return PartialState().is_local_main_process
997
+
998
+ def wait_for_everyone(self):
999
+ PartialState().wait_for_everyone()
1000
+
1001
+ @contextmanager
1002
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
1003
+ """
1004
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
1005
+ distributed inference, such as with different prompts.
1006
+
1007
+ Note that when using a `dict`, all keys need to have the same number of elements.
1008
+
1009
+ Args:
1010
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
1011
+ The input to split between processes.
1012
+ apply_padding (`bool`, `optional`, defaults to `False`):
1013
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
1014
+ number of elements. Useful when trying to perform actions such as `gather()` on the outputs or passing
1015
+ in less inputs than there are processes. If so, just remember to drop the padded elements afterwards.
1016
+
1017
+
1018
+ Example:
1019
+
1020
+ ```python
1021
+ # Assume there are two processes
1022
+ from accelerate.state import AcceleratorState
1023
+
1024
+ state = AcceleratorState()
1025
+ with state.split_between_processes(["A", "B", "C"]) as inputs:
1026
+ print(inputs)
1027
+ # Process 0
1028
+ ["A", "B"]
1029
+ # Process 1
1030
+ ["C"]
1031
+
1032
+ with state.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
1033
+ print(inputs)
1034
+ # Process 0
1035
+ ["A", "B"]
1036
+ # Process 1
1037
+ ["C", "C"]
1038
+ ```
1039
+ """
1040
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
1041
+ yield inputs
1042
+
1043
+ @contextmanager
1044
+ def main_process_first(self):
1045
+ """
1046
+ Lets the main process go first inside a with block.
1047
+
1048
+ The other processes will enter the with block after the main process exits.
1049
+ """
1050
+ with PartialState().main_process_first():
1051
+ yield
1052
+
1053
+ @contextmanager
1054
+ def local_main_process_first(self):
1055
+ """
1056
+ Lets the local main process go inside a with block.
1057
+
1058
+ The other processes will enter the with block after the main process exits.
1059
+ """
1060
+ with PartialState().local_main_process_first():
1061
+ yield
1062
+
1063
+ def print(self, *args, **kwargs):
1064
+ PartialState().print(*args, **kwargs)
1065
+
1066
+ def __getattr__(self, name: str):
1067
+ # By this point we know that no attributes of `self` contain `name`,
1068
+ # so we just modify the error message
1069
+ if name in self._known_attrs:
1070
+ raise AttributeError(
1071
+ f"`AcceleratorState` object has no attribute `{name}`. "
1072
+ "This happens if `AcceleratorState._reset_state()` was called and "
1073
+ "an `Accelerator` or `PartialState` was not reinitialized."
1074
+ )
1075
+ # Raise a typical AttributeError
1076
+ raise AttributeError(f"'AcceleratorState' object has no attribute '{name}'")
1077
+
1078
+
1079
+ class GradientState:
1080
+ """
1081
+ Singleton class that has information related to gradient synchronization for gradient accumulation
1082
+
1083
+ **Available attributes:**
1084
+
1085
+ - **end_of_dataloader** (`bool`) -- Whether we have reached the end the current dataloader
1086
+ - **remainder** (`int`) -- The number of extra samples that were added from padding the dataloader
1087
+ - **sync_gradients** (`bool`) -- Whether the gradients should be synced across all devices
1088
+ - **active_dataloader** (`Optional[DataLoader]`) -- The dataloader that is currently being iterated over
1089
+ - **dataloader_references** (`List[Optional[DataLoader]]`) -- A list of references to the dataloaders that are
1090
+ being iterated over
1091
+ - **num_steps** (`int`) -- The number of steps to accumulate over
1092
+ - **adjust_scheduler** (`bool`) -- Whether the scheduler should be adjusted to account for the gradient
1093
+ accumulation
1094
+ - **sync_with_dataloader** (`bool`) -- Whether the gradients should be synced at the end of the dataloader
1095
+ iteration and the number of total steps reset
1096
+ - **is_xla_gradients_synced** (`bool`) -- Whether the XLA gradients have been synchronized. It is initialized
1097
+ as false. Once gradients have been reduced before the optimizer step, this flag is set to true. Subsequently,
1098
+ after each step, the flag is reset to false. FSDP will always synchronize the gradients, hence
1099
+ is_xla_gradients_synced is always true.
1100
+ """
1101
+
1102
+ _shared_state = SharedDict()
1103
+
1104
+ def __init__(self, gradient_accumulation_plugin: Optional[GradientAccumulationPlugin] = None):
1105
+ self.__dict__ = self._shared_state
1106
+ if not self.initialized:
1107
+ self.sync_gradients = True
1108
+ self.active_dataloader = None
1109
+ self.dataloader_references = [None]
1110
+ self.plugin_kwargs = (
1111
+ gradient_accumulation_plugin.to_kwargs() if gradient_accumulation_plugin is not None else {}
1112
+ )
1113
+ self._is_xla_gradients_synced = False
1114
+
1115
+ # Plugin args are different and can be updated
1116
+ if gradient_accumulation_plugin is not None and self.plugin_kwargs != gradient_accumulation_plugin.to_kwargs():
1117
+ self.plugin_kwargs = gradient_accumulation_plugin.to_kwargs()
1118
+
1119
+ @property
1120
+ def num_steps(self) -> int:
1121
+ "Returns the number of steps to accumulate over"
1122
+ return self.plugin_kwargs.get("num_steps", 1)
1123
+
1124
+ @property
1125
+ def adjust_scheduler(self) -> bool:
1126
+ "Returns whether the scheduler should be adjusted"
1127
+ return self.plugin_kwargs.get("adjust_scheduler", False)
1128
+
1129
+ @property
1130
+ def sync_with_dataloader(self) -> bool:
1131
+ "Returns whether the gradients should be synced at the end of the dataloader iteration and the number of total steps reset"
1132
+ return self.plugin_kwargs.get("sync_with_dataloader", True)
1133
+
1134
+ @property
1135
+ def initialized(self) -> bool:
1136
+ "Returns whether the `GradientState` has been initialized"
1137
+ return GradientState._shared_state != {}
1138
+
1139
+ @property
1140
+ def end_of_dataloader(self) -> bool:
1141
+ "Returns whether we have reached the end of the current dataloader"
1142
+ if not self.in_dataloader:
1143
+ return False
1144
+ return self.active_dataloader.end_of_dataloader
1145
+
1146
+ @property
1147
+ def remainder(self) -> int:
1148
+ "Returns the number of extra samples that were added from padding the dataloader"
1149
+ if not self.in_dataloader:
1150
+ return -1
1151
+ return self.active_dataloader.remainder
1152
+
1153
+ def __repr__(self):
1154
+ return (
1155
+ f"Sync Gradients: {self.sync_gradients}\n"
1156
+ f"At end of current dataloader: {self.end_of_dataloader}\n"
1157
+ f"Extra samples added: {self.remainder}\n"
1158
+ f"Gradient accumulation plugin: {self.plugin_kwargs}\n"
1159
+ )
1160
+
1161
+ @property
1162
+ def is_xla_gradients_synced(self):
1163
+ "Returns the value of is_xla_gradients_synced. FSDP will always synchronize the gradients, hence is_xla_gradients_synced is always true."
1164
+ if parse_flag_from_env("ACCELERATE_USE_FSDP", default=False):
1165
+ return True
1166
+ return self._is_xla_gradients_synced
1167
+
1168
+ @is_xla_gradients_synced.setter
1169
+ def is_xla_gradients_synced(self, is_synced):
1170
+ "Set the _is_xla_gradients_synced attribute."
1171
+ self._is_xla_gradients_synced = is_synced
1172
+
1173
+ def _set_sync_gradients(self, sync_gradients):
1174
+ "Private function that sets whether gradients should be synchronized. Users should not have to call this."
1175
+ self.sync_gradients = sync_gradients
1176
+ # Allow grad-sync to automatically work on TPUs
1177
+ if (
1178
+ self.sync_gradients
1179
+ and is_torch_xla_available(check_is_tpu=True)
1180
+ and PartialState().distributed_type == DistributedType.XLA
1181
+ ):
1182
+ xm.mark_step()
1183
+
1184
+ def _add_dataloader(self, dataloader):
1185
+ "Private function that adds a dataloader to `self.dataloader_references` and sets `in_dataloader` to `True`. Users should not have to call this."
1186
+ self.active_dataloader = dataloader
1187
+ self.dataloader_references.append(self.active_dataloader)
1188
+
1189
+ def _remove_dataloader(self, dataloader):
1190
+ "Private function that removes a dataloader from `self.dataloader_references` and sets `in_dataloader` to `False` if there are no more dataloaders. Users should not have to call this."
1191
+ self.dataloader_references.remove(dataloader)
1192
+ self.active_dataloader = self.dataloader_references[-1]
1193
+
1194
+ @property
1195
+ def in_dataloader(self) -> bool:
1196
+ "Returns whether the current process is in a dataloader"
1197
+ return self.active_dataloader is not None
1198
+
1199
+ @staticmethod
1200
+ def _reset_state():
1201
+ "Resets `_shared_state`, is used internally and should not be called"
1202
+ GradientState._shared_state.clear()
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__init__.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .testing import (
15
+ DEFAULT_LAUNCH_COMMAND,
16
+ are_the_same_tensors,
17
+ assert_exception,
18
+ device_count,
19
+ execute_subprocess_async,
20
+ get_launch_command,
21
+ memory_allocated_func,
22
+ path_in_accelerate_package,
23
+ require_bnb,
24
+ require_cpu,
25
+ require_cuda,
26
+ require_huggingface_suite,
27
+ require_mlu,
28
+ require_mps,
29
+ require_multi_device,
30
+ require_multi_gpu,
31
+ require_multi_xpu,
32
+ require_non_cpu,
33
+ require_non_torch_xla,
34
+ require_non_xpu,
35
+ require_npu,
36
+ require_pippy,
37
+ require_single_device,
38
+ require_single_gpu,
39
+ require_single_xpu,
40
+ require_torch_min_version,
41
+ require_tpu,
42
+ require_xpu,
43
+ skip,
44
+ slow,
45
+ torch_device,
46
+ )
47
+ from .training import RegressionDataset, RegressionModel, RegressionModel4XPU
48
+
49
+
50
+ from .scripts import test_script, test_sync, test_ops # isort: skip
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/examples.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/testing.cpython-310.pyc ADDED
Binary file (20.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/__pycache__/training.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/examples.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ A collection of utilities for comparing `examples/complete_*_example.py` scripts with the capabilities inside of each
18
+ `examples/by_feature` example. `compare_against_test` is the main function that should be used when testing, while the
19
+ others are used to either get the code that matters, or to preprocess them (such as stripping comments)
20
+ """
21
+
22
+ import os
23
+ from typing import List
24
+
25
+
26
+ def get_function_contents_by_name(lines: List[str], name: str):
27
+ """
28
+ Extracts a function from `lines` of segmented source code with the name `name`.
29
+
30
+ Args:
31
+ lines (`List[str]`):
32
+ Source code of a script seperated by line.
33
+ name (`str`):
34
+ The name of the function to extract. Should be either `training_function` or `main`
35
+ """
36
+ if name != "training_function" and name != "main":
37
+ raise ValueError(f"Incorrect function name passed: {name}, choose either 'main' or 'training_function'")
38
+ good_lines, found_start = [], False
39
+ for line in lines:
40
+ if not found_start and f"def {name}" in line:
41
+ found_start = True
42
+ good_lines.append(line)
43
+ continue
44
+ if found_start:
45
+ if name == "training_function" and "def main" in line:
46
+ return good_lines
47
+ if name == "main" and "if __name__" in line:
48
+ return good_lines
49
+ good_lines.append(line)
50
+
51
+
52
+ def clean_lines(lines: List[str]):
53
+ """
54
+ Filters `lines` and removes any entries that start with a comment ('#') or is just a newline ('\n')
55
+
56
+ Args:
57
+ lines (`List[str]`):
58
+ Source code of a script seperated by line.
59
+ """
60
+ return [line for line in lines if not line.lstrip().startswith("#") and line != "\n"]
61
+
62
+
63
+ def compare_against_test(base_filename: str, feature_filename: str, parser_only: bool, secondary_filename: str = None):
64
+ """
65
+ Tests whether the additional code inside of `feature_filename` was implemented in `base_filename`. This should be
66
+ used when testing to see if `complete_*_.py` examples have all of the implementations from each of the
67
+ `examples/by_feature/*` scripts.
68
+
69
+ It utilizes `nlp_example.py` to extract out all of the repeated training code, so that only the new additional code
70
+ is examined and checked. If something *other* than `nlp_example.py` should be used, such as `cv_example.py` for the
71
+ `complete_cv_example.py` script, it should be passed in for the `secondary_filename` parameter.
72
+
73
+ Args:
74
+ base_filename (`str` or `os.PathLike`):
75
+ The filepath of a single "complete" example script to test, such as `examples/complete_cv_example.py`
76
+ feature_filename (`str` or `os.PathLike`):
77
+ The filepath of a single feature example script. The contents of this script are checked to see if they
78
+ exist in `base_filename`
79
+ parser_only (`bool`):
80
+ Whether to compare only the `main()` sections in both files, or to compare the contents of
81
+ `training_loop()`
82
+ secondary_filename (`str`, *optional*):
83
+ A potential secondary filepath that should be included in the check. This function extracts the base
84
+ functionalities off of "examples/nlp_example.py", so if `base_filename` is a script other than
85
+ `complete_nlp_example.py`, the template script should be included here. Such as `examples/cv_example.py`
86
+ """
87
+ with open(base_filename) as f:
88
+ base_file_contents = f.readlines()
89
+ with open(os.path.abspath(os.path.join("examples", "nlp_example.py"))) as f:
90
+ full_file_contents = f.readlines()
91
+ with open(feature_filename) as f:
92
+ feature_file_contents = f.readlines()
93
+ if secondary_filename is not None:
94
+ with open(secondary_filename) as f:
95
+ secondary_file_contents = f.readlines()
96
+
97
+ # This is our base, we remove all the code from here in our `full_filename` and `feature_filename` to find the new content
98
+ if parser_only:
99
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "main"))
100
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "main"))
101
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "main"))
102
+ if secondary_filename is not None:
103
+ secondary_file_func = clean_lines(get_function_contents_by_name(secondary_file_contents, "main"))
104
+ else:
105
+ base_file_func = clean_lines(get_function_contents_by_name(base_file_contents, "training_function"))
106
+ full_file_func = clean_lines(get_function_contents_by_name(full_file_contents, "training_function"))
107
+ feature_file_func = clean_lines(get_function_contents_by_name(feature_file_contents, "training_function"))
108
+ if secondary_filename is not None:
109
+ secondary_file_func = clean_lines(
110
+ get_function_contents_by_name(secondary_file_contents, "training_function")
111
+ )
112
+
113
+ _dl_line = "train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)\n"
114
+
115
+ # Specific code in our script that differs from the full version, aka what is new
116
+ new_feature_code = []
117
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
118
+ it = iter(feature_file_func)
119
+ for i in range(len(feature_file_func) - 1):
120
+ if i not in passed_idxs:
121
+ line = next(it)
122
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
123
+ if "TESTING_MOCKED_DATALOADERS" not in line:
124
+ new_feature_code.append(line)
125
+ passed_idxs.append(i)
126
+ else:
127
+ # Skip over the `config['num_epochs'] = 2` statement
128
+ _ = next(it)
129
+
130
+ # Extract out just the new parts from the full_file_training_func
131
+ new_full_example_parts = []
132
+ passed_idxs = [] # We keep track of the idxs just in case it's a repeated statement
133
+ for i, line in enumerate(base_file_func):
134
+ if i not in passed_idxs:
135
+ if (line not in full_file_func) and (line.lstrip() != _dl_line):
136
+ if "TESTING_MOCKED_DATALOADERS" not in line:
137
+ new_full_example_parts.append(line)
138
+ passed_idxs.append(i)
139
+
140
+ # Finally, get the overall diff
141
+ diff_from_example = [line for line in new_feature_code if line not in new_full_example_parts]
142
+ if secondary_filename is not None:
143
+ diff_from_two = [line for line in full_file_contents if line not in secondary_file_func]
144
+ diff_from_example = [line for line in diff_from_example if line not in diff_from_two]
145
+
146
+ return diff_from_example
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_cli.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import torch
15
+
16
+
17
+ def main():
18
+ if torch.cuda.is_available():
19
+ num_gpus = torch.cuda.device_count()
20
+ else:
21
+ num_gpus = 0
22
+ print(f"Successfully ran on {num_gpus} GPUs")
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_distributed_data_loop.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import warnings
19
+ from typing import List
20
+ from unittest.mock import Mock
21
+
22
+ import torch
23
+ from torch.utils.data import DataLoader, IterableDataset, TensorDataset
24
+
25
+ from accelerate.accelerator import Accelerator, DataLoaderConfiguration
26
+ from accelerate.utils.dataclasses import DistributedType
27
+
28
+
29
+ class DummyIterableDataset(IterableDataset):
30
+ def __init__(self, data):
31
+ self.data = data
32
+
33
+ def __iter__(self):
34
+ yield from self.data
35
+
36
+
37
+ def create_accelerator(even_batches=True):
38
+ dataloader_config = DataLoaderConfiguration(even_batches=even_batches)
39
+ accelerator = Accelerator(dataloader_config=dataloader_config)
40
+ assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
41
+ return accelerator
42
+
43
+
44
+ def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False):
45
+ """
46
+ Create a simple DataLoader to use during the test cases
47
+ """
48
+ if iterable:
49
+ dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size)))
50
+ else:
51
+ dataset = TensorDataset(torch.as_tensor(range(dataset_size)))
52
+
53
+ dl = DataLoader(dataset, batch_size=batch_size)
54
+ dl = accelerator.prepare(dl)
55
+
56
+ return dl
57
+
58
+
59
+ def verify_dataloader_batch_sizes(
60
+ accelerator: Accelerator,
61
+ dataset_size: int,
62
+ batch_size: int,
63
+ process_0_expected_batch_sizes: List[int],
64
+ process_1_expected_batch_sizes: List[int],
65
+ ):
66
+ """
67
+ A helper function for verifying the batch sizes coming from a prepared dataloader in each process
68
+ """
69
+ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size)
70
+
71
+ batch_sizes = [len(batch[0]) for batch in dl]
72
+
73
+ if accelerator.process_index == 0:
74
+ assert batch_sizes == process_0_expected_batch_sizes
75
+ elif accelerator.process_index == 1:
76
+ assert batch_sizes == process_1_expected_batch_sizes
77
+
78
+
79
+ def test_default_ensures_even_batch_sizes():
80
+ accelerator = create_accelerator()
81
+
82
+ # without padding, we would expect a different number of batches
83
+ verify_dataloader_batch_sizes(
84
+ accelerator,
85
+ dataset_size=3,
86
+ batch_size=1,
87
+ process_0_expected_batch_sizes=[1, 1],
88
+ process_1_expected_batch_sizes=[1, 1],
89
+ )
90
+
91
+ # without padding, we would expect the same number of batches, but different sizes
92
+ verify_dataloader_batch_sizes(
93
+ accelerator,
94
+ dataset_size=7,
95
+ batch_size=2,
96
+ process_0_expected_batch_sizes=[2, 2],
97
+ process_1_expected_batch_sizes=[2, 2],
98
+ )
99
+
100
+
101
+ def test_can_disable_even_batches():
102
+ accelerator = create_accelerator(even_batches=False)
103
+
104
+ verify_dataloader_batch_sizes(
105
+ accelerator,
106
+ dataset_size=3,
107
+ batch_size=1,
108
+ process_0_expected_batch_sizes=[1, 1],
109
+ process_1_expected_batch_sizes=[1],
110
+ )
111
+
112
+ verify_dataloader_batch_sizes(
113
+ accelerator,
114
+ dataset_size=7,
115
+ batch_size=2,
116
+ process_0_expected_batch_sizes=[2, 2],
117
+ process_1_expected_batch_sizes=[2, 1],
118
+ )
119
+
120
+
121
+ def test_can_join_uneven_inputs():
122
+ accelerator = create_accelerator(even_batches=False)
123
+
124
+ model = torch.nn.Linear(1, 1)
125
+ ddp_model = accelerator.prepare(model)
126
+
127
+ dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
128
+
129
+ batch_idxs = []
130
+ with accelerator.join_uneven_inputs([ddp_model]):
131
+ for batch_idx, batch in enumerate(dl):
132
+ output = ddp_model(batch[0].float())
133
+ loss = output.sum()
134
+ loss.backward()
135
+ batch_idxs.append(batch_idx)
136
+
137
+ accelerator.wait_for_everyone()
138
+
139
+ if accelerator.process_index == 0:
140
+ assert batch_idxs == [0, 1]
141
+ elif accelerator.process_index == 1:
142
+ assert batch_idxs == [0]
143
+
144
+
145
+ def test_join_raises_warning_for_non_ddp_distributed(accelerator):
146
+ with warnings.catch_warnings(record=True) as w:
147
+ with accelerator.join_uneven_inputs([Mock()]):
148
+ pass
149
+
150
+ assert issubclass(w[-1].category, UserWarning)
151
+ assert "only supported for multi-GPU" in str(w[-1].message)
152
+
153
+
154
+ def test_join_can_override_even_batches():
155
+ default_even_batches = True
156
+ overridden_even_batches = False
157
+ accelerator = create_accelerator(even_batches=default_even_batches)
158
+ model = torch.nn.Linear(1, 1)
159
+ ddp_model = accelerator.prepare(model)
160
+ train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
161
+ valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
162
+
163
+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
164
+ train_dl_overridden_value = train_dl.batch_sampler.even_batches
165
+ valid_dl_overridden_value = valid_dl.batch_sampler.even_batches
166
+
167
+ assert train_dl_overridden_value == overridden_even_batches
168
+ assert valid_dl_overridden_value == overridden_even_batches
169
+ assert train_dl.batch_sampler.even_batches == default_even_batches
170
+ assert valid_dl.batch_sampler.even_batches == default_even_batches
171
+
172
+
173
+ def test_join_can_override_for_mixed_type_dataloaders():
174
+ default_even_batches = True
175
+ overridden_even_batches = False
176
+ accelerator = create_accelerator(even_batches=default_even_batches)
177
+ model = torch.nn.Linear(1, 1)
178
+ ddp_model = accelerator.prepare(model)
179
+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
180
+ batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1)
181
+
182
+ with warnings.catch_warnings():
183
+ warnings.filterwarnings("ignore")
184
+ try:
185
+ with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches):
186
+ batch_dl_overridden_value = batch_dl.batch_sampler.even_batches
187
+ except AttributeError:
188
+ # ensure attribute error is not raised when processing iterable dl
189
+ raise AssertionError
190
+
191
+ assert batch_dl_overridden_value == overridden_even_batches
192
+ assert batch_dl.batch_sampler.even_batches == default_even_batches
193
+
194
+
195
+ def test_join_raises_warning_for_iterable_when_overriding_even_batches():
196
+ accelerator = create_accelerator()
197
+ model = torch.nn.Linear(1, 1)
198
+ ddp_model = accelerator.prepare(model)
199
+ create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True)
200
+
201
+ with warnings.catch_warnings(record=True) as w:
202
+ with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
203
+ pass
204
+
205
+ assert issubclass(w[-1].category, UserWarning)
206
+ assert "only supported for map-style datasets" in str(w[-1].message)
207
+
208
+
209
+ def main():
210
+ accelerator = create_accelerator()
211
+
212
+ accelerator.print("Test that even_batches variable ensures uniform batches across processes")
213
+ test_default_ensures_even_batch_sizes()
214
+
215
+ accelerator.print("Run tests with even_batches disabled")
216
+ test_can_disable_even_batches()
217
+
218
+ accelerator.print("Test joining uneven inputs")
219
+ test_can_join_uneven_inputs()
220
+
221
+ accelerator.print("Test overriding even_batches when joining uneven inputs")
222
+ test_join_can_override_even_batches()
223
+
224
+ accelerator.print("Test overriding even_batches for mixed dataloader types")
225
+ test_join_can_override_for_mixed_type_dataloaders()
226
+
227
+ accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders")
228
+ test_join_raises_warning_for_iterable_when_overriding_even_batches()
229
+
230
+ accelerator.print("Test join with non DDP distributed raises warning")
231
+ original_state = accelerator.state.distributed_type
232
+ accelerator.state.distributed_type = DistributedType.FSDP
233
+ test_join_raises_warning_for_non_ddp_distributed(accelerator)
234
+ accelerator.state.distributed_type = original_state
235
+
236
+
237
+ if __name__ == "__main__":
238
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_sync.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from copy import deepcopy
16
+
17
+ import torch
18
+ import torch.nn.functional as F
19
+ from torch.optim import AdamW
20
+ from torch.optim.lr_scheduler import LambdaLR
21
+ from torch.utils.data import DataLoader
22
+
23
+ from accelerate.accelerator import Accelerator, GradientAccumulationPlugin
24
+ from accelerate.state import GradientState
25
+ from accelerate.test_utils import RegressionDataset, RegressionModel
26
+ from accelerate.utils import DistributedType, set_seed
27
+
28
+
29
+ def check_model_parameters(model_a, model_b, did_step, iteration, **kwargs):
30
+ for param, grad_param in zip(model_a.parameters(), model_b.parameters()):
31
+ if not param.requires_grad:
32
+ continue
33
+ if not did_step:
34
+ # Grads should not be in sync
35
+ assert (
36
+ torch.allclose(param.grad, grad_param.grad, **kwargs) is False
37
+ ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
38
+ else:
39
+ # Grads should be in sync
40
+ assert (
41
+ torch.allclose(param.grad, grad_param.grad, **kwargs) is True
42
+ ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
43
+
44
+
45
+ def step_model(model, input, target, accelerator, do_backward=True):
46
+ model.train()
47
+ output = model(input)
48
+ loss = F.mse_loss(output, target.to(output.device))
49
+ if not do_backward:
50
+ loss /= accelerator.gradient_accumulation_steps
51
+ loss.backward()
52
+ else:
53
+ accelerator.backward(loss)
54
+
55
+
56
+ def get_training_setup(accelerator, sched=False):
57
+ "Returns everything needed to perform basic training"
58
+ set_seed(42)
59
+ model = RegressionModel()
60
+ ddp_model = deepcopy(model)
61
+ dset = RegressionDataset(length=80)
62
+ dataloader = DataLoader(dset, batch_size=16)
63
+ model.to(accelerator.device)
64
+ if sched:
65
+ opt = AdamW(params=model.parameters(), lr=1e-3)
66
+ ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3)
67
+ sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65)
68
+ ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65)
69
+ # Make a copy of `model`
70
+ if sched:
71
+ ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader)
72
+ else:
73
+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
74
+ if sched:
75
+ return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
76
+ return model, ddp_model, dataloader
77
+
78
+
79
+ def test_noop_sync(accelerator):
80
+ # Test when on a single CPU or GPU that the context manager does nothing
81
+ model, ddp_model, dataloader = get_training_setup(accelerator)
82
+ # Use a single batch
83
+ ddp_input, ddp_target = next(iter(dataloader)).values()
84
+ for iteration in range(3):
85
+ # Gather the distributed inputs and targs for the base model
86
+ input, target = accelerator.gather((ddp_input, ddp_target))
87
+ input, target = input.to(accelerator.device), target.to(accelerator.device)
88
+ # Perform our initial ground truth step in non "DDP"
89
+ step_model(model, input, target, accelerator)
90
+ # Do "gradient accumulation" (noop)
91
+ if iteration % 2 == 0:
92
+ # Accumulate grads locally
93
+ with accelerator.no_sync(ddp_model):
94
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
95
+ else:
96
+ # Sync grads
97
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
98
+
99
+ # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
100
+ check_model_parameters(model, ddp_model, True, iteration)
101
+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
102
+ if not param.requires_grad:
103
+ continue
104
+ assert torch.allclose(
105
+ param.grad, ddp_param.grad
106
+ ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
107
+
108
+ # Shuffle ddp_input on each iteration
109
+ torch.manual_seed(1337 + iteration)
110
+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]
111
+
112
+
113
+ def test_distributed_sync(accelerator):
114
+ # Test on distributed setup that context manager behaves properly
115
+ model, ddp_model, dataloader = get_training_setup(accelerator)
116
+ # Use a single batch
117
+ ddp_input, ddp_target = next(iter(dataloader)).values()
118
+ for iteration in range(3):
119
+ # Gather the distributed inputs and targs for the base model
120
+ input, target = accelerator.gather((ddp_input, ddp_target))
121
+ input, target = input.to(accelerator.device), target.to(accelerator.device)
122
+ # Perform our initial ground truth step in non "DDP"
123
+ step_model(model, input, target, accelerator)
124
+ # Do "gradient accumulation" (noop)
125
+ if iteration % 2 == 0:
126
+ # Accumulate grads locally
127
+ with accelerator.no_sync(ddp_model):
128
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
129
+ else:
130
+ # Sync grads
131
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
132
+
133
+ # DDP model and model should only be in sync when not (iteration % 2 == 0)
134
+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
135
+ if not param.requires_grad:
136
+ continue
137
+ if iteration % 2 == 0:
138
+ # Grads should not be in sync
139
+ assert (
140
+ torch.allclose(param.grad, ddp_param.grad) is False
141
+ ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
142
+ else:
143
+ # Grads should be in sync
144
+ assert (
145
+ torch.allclose(param.grad, ddp_param.grad) is True
146
+ ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
147
+
148
+ # Shuffle ddp_input on each iteration
149
+ torch.manual_seed(1337 + iteration)
150
+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]
151
+
152
+
153
+ def test_distributed_sync_multiple_fwd(accelerator):
154
+ # Test on distributed setup that context manager behaves properly when used with multiple forwards followed by multiple backwards
155
+ model, ddp_model, dataloader = get_training_setup(accelerator)
156
+ # Do multiple forwards
157
+ losses = []
158
+ num_iterations = 3
159
+ for iteration in range(num_iterations):
160
+ ddp_input, ddp_target = next(iter(dataloader)).values()
161
+
162
+ # Gather the distributed inputs and targs for the base model
163
+ input, target = accelerator.gather((ddp_input, ddp_target))
164
+ input, target = input.to(accelerator.device), target.to(accelerator.device)
165
+
166
+ # Perform our initial ground truth step in non "DDP"
167
+ step_model(model, input, target, accelerator)
168
+
169
+ # Accumulate grads locally
170
+ with accelerator.no_sync(ddp_model):
171
+ ddp_output = ddp_model(ddp_input)
172
+ loss = F.mse_loss(ddp_output, ddp_target.to(ddp_output.device))
173
+ losses.append(loss)
174
+
175
+ # Do multiple backwards and sync only at the last backward
176
+ for iteration in range(num_iterations):
177
+ loss = losses[iteration]
178
+
179
+ if iteration < num_iterations - 1:
180
+ # Accumulate grads locally
181
+ accelerator.backward(loss)
182
+
183
+ # DDP model and model should only be in sync after last backward
184
+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
185
+ if not param.requires_grad:
186
+ continue
187
+ # Grads should not be in sync
188
+ assert (
189
+ torch.allclose(param.grad, ddp_param.grad) is False
190
+ ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
191
+
192
+ else:
193
+ # Sync grads if last backward
194
+ with accelerator.trigger_sync_in_backward(ddp_model):
195
+ accelerator.backward(loss)
196
+
197
+ # DDP model and model should only be in sync after last backward
198
+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
199
+ if not param.requires_grad:
200
+ continue
201
+ # Grads should be in sync
202
+ assert (
203
+ torch.allclose(param.grad, ddp_param.grad) is True
204
+ ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
205
+
206
+
207
+ def test_gradient_accumulation(split_batches=False, dispatch_batches=False, sync_each_batch=False):
208
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch)
209
+ accelerator = Accelerator(
210
+ split_batches=split_batches,
211
+ dispatch_batches=dispatch_batches,
212
+ gradient_accumulation_plugin=gradient_accumulation_plugin,
213
+ )
214
+ # Test that context manager behaves properly
215
+ model, ddp_model, dataloader = get_training_setup(accelerator)
216
+ for iteration, batch in enumerate(dataloader):
217
+ ddp_input, ddp_target = batch.values()
218
+ # Gather the distributed inputs and targs for the base model
219
+ input, target = accelerator.gather((ddp_input, ddp_target))
220
+ input, target = input.to(accelerator.device), target.to(accelerator.device)
221
+ # Perform our initial ground truth step in non "DDP"
222
+ step_model(model, input, target, accelerator, False)
223
+ # Do "gradient accumulation" (noop)
224
+ with accelerator.accumulate(ddp_model):
225
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
226
+
227
+ # DDP model and model should only be in sync when not (iteration % 2 == 0)
228
+ for param, ddp_param in zip(model.parameters(), ddp_model.parameters()):
229
+ if not param.requires_grad:
230
+ continue
231
+ if ((iteration + 1) % 2 == 0) or (iteration == len(dataloader) - 1) or sync_each_batch:
232
+ # Grads should be in sync
233
+ assert (
234
+ torch.allclose(param.grad, ddp_param.grad) is True
235
+ ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
236
+ else:
237
+ # Grads should not be in sync
238
+ assert (
239
+ torch.allclose(param.grad, ddp_param.grad) is False
240
+ ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
241
+
242
+ # Shuffle ddp_input on each iteration
243
+ torch.manual_seed(1337 + iteration)
244
+ ddp_input = ddp_input[torch.randperm(len(ddp_input))]
245
+ GradientState._reset_state()
246
+
247
+
248
+ def test_gradient_accumulation_with_opt_and_scheduler(
249
+ split_batches=False, dispatch_batches=False, sync_each_batch=False
250
+ ):
251
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=2, sync_each_batch=sync_each_batch)
252
+ accelerator = Accelerator(
253
+ split_batches=split_batches,
254
+ dispatch_batches=dispatch_batches,
255
+ gradient_accumulation_plugin=gradient_accumulation_plugin,
256
+ )
257
+ # Test that context manager behaves properly
258
+ model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched = get_training_setup(accelerator, True)
259
+ for iteration, batch in enumerate(dataloader):
260
+ ddp_input, ddp_target = batch.values()
261
+ # Gather the distributed inputs and targs for the base model
262
+ input, target = accelerator.gather((ddp_input, ddp_target))
263
+ input, target = input.to(accelerator.device), target.to(accelerator.device)
264
+ # Perform our initial ground truth step in non "DDP"
265
+ model.train()
266
+ ddp_model.train()
267
+ step_model(model, input, target, accelerator, False)
268
+ opt.step()
269
+
270
+ if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch:
271
+ if split_batches:
272
+ sched.step()
273
+ else:
274
+ for _ in range(accelerator.num_processes):
275
+ sched.step()
276
+
277
+ # Perform gradient accumulation under wrapper
278
+ with accelerator.accumulate(ddp_model):
279
+ step_model(ddp_model, ddp_input, ddp_target, accelerator)
280
+ ddp_opt.step()
281
+ ddp_sched.step()
282
+
283
+ # Learning rates should be the same
284
+ assert (
285
+ opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
286
+ ), f'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
287
+ did_step = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch
288
+ if accelerator.num_processes > 1:
289
+ check_model_parameters(
290
+ model,
291
+ ddp_model,
292
+ did_step,
293
+ iteration,
294
+ rtol=1e-3, # somehow needs a relative tolerance
295
+ )
296
+
297
+ if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(dataloader)) or sync_each_batch:
298
+ opt.zero_grad() # needs to be guarded by logic as to when we should zero grads
299
+ ddp_opt.zero_grad()
300
+
301
+ # Shuffle ddp_input on each iteration
302
+ torch.manual_seed(1337 + iteration)
303
+ GradientState._reset_state()
304
+
305
+
306
+ def test_dataloader_break():
307
+ accelerator = Accelerator()
308
+
309
+ first_dset = RegressionDataset(length=80)
310
+ first_dataloader = DataLoader(first_dset, batch_size=16)
311
+ second_dset = RegressionDataset(length=96)
312
+ second_dataloader = DataLoader(second_dset, batch_size=16)
313
+ first_dataloader, second_dataloader = accelerator.prepare(first_dataloader, second_dataloader)
314
+ assert accelerator.gradient_state.active_dataloader is None
315
+ for iteration, _ in enumerate(first_dataloader):
316
+ assert id(accelerator.gradient_state.active_dataloader) == id(first_dataloader)
317
+ if iteration < len(first_dataloader) - 1:
318
+ assert not accelerator.gradient_state.end_of_dataloader
319
+ if iteration == 1:
320
+ for batch_num, _ in enumerate(second_dataloader):
321
+ assert id(accelerator.gradient_state.active_dataloader) == id(second_dataloader)
322
+ if batch_num < len(second_dataloader) - 1:
323
+ assert not accelerator.gradient_state.end_of_dataloader
324
+ else:
325
+ assert accelerator.gradient_state.end_of_dataloader
326
+ else:
327
+ assert accelerator.gradient_state.end_of_dataloader
328
+ assert accelerator.gradient_state.active_dataloader is None
329
+
330
+
331
+ def main():
332
+ accelerator = Accelerator()
333
+ state = accelerator.state
334
+ if state.local_process_index == 0:
335
+ print("**Test `accumulate` gradient accumulation with dataloader break**")
336
+ if state.distributed_type != DistributedType.XLA:
337
+ test_dataloader_break()
338
+ if state.distributed_type == DistributedType.NO:
339
+ if state.local_process_index == 0:
340
+ print("**Test NOOP `no_sync` context manager**")
341
+ test_noop_sync(accelerator)
342
+ if state.distributed_type in (
343
+ DistributedType.MULTI_GPU,
344
+ DistributedType.MULTI_NPU,
345
+ DistributedType.MULTI_MLU,
346
+ DistributedType.MULTI_CPU,
347
+ ):
348
+ if state.local_process_index == 0:
349
+ print("**Test Distributed `no_sync` context manager**")
350
+ test_distributed_sync(accelerator)
351
+ if state.local_process_index == 0:
352
+ print("**Test Distributed `no_sync` context manager with multiple forwards**")
353
+ test_distributed_sync_multiple_fwd(accelerator)
354
+ if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU):
355
+ for split_batch in [True, False]:
356
+ for dispatch_batches in [True, False]:
357
+ for sync_each_batch in [True, False]:
358
+ if state.local_process_index == 0:
359
+ print(
360
+ "**Test `accumulate` gradient accumulation, ",
361
+ f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**",
362
+ )
363
+ test_gradient_accumulation(split_batch, dispatch_batches, sync_each_batch)
364
+
365
+ # Currently will break on torch 2.0 +, need to investigate why
366
+ if state.local_process_index == 0:
367
+ print(
368
+ "**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
369
+ "`split_batches=False`, `dispatch_batches=False`, `sync_each_batch=False`**",
370
+ )
371
+ test_gradient_accumulation_with_opt_and_scheduler()
372
+ if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU):
373
+ for split_batch in [True, False]:
374
+ for dispatch_batches in [True, False]:
375
+ for sync_each_batch in [True, False]:
376
+ if not split_batch and not dispatch_batches and not sync_each_batch:
377
+ continue
378
+ if state.local_process_index == 0:
379
+ print(
380
+ "**Test `accumulate` gradient accumulation with optimizer and scheduler, ",
381
+ f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}` and `sync_each_batch={sync_each_batch}`**",
382
+ )
383
+ test_gradient_accumulation_with_opt_and_scheduler(split_batch, dispatch_batches, sync_each_batch)
384
+
385
+
386
+ def _mp_fn(index):
387
+ # For xla_spawn (TPUs)
388
+ main()
389
+
390
+
391
+ if __name__ == "__main__":
392
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/testing.py ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import asyncio
16
+ import inspect
17
+ import os
18
+ import shutil
19
+ import subprocess
20
+ import sys
21
+ import tempfile
22
+ import unittest
23
+ from contextlib import contextmanager
24
+ from functools import partial
25
+ from pathlib import Path
26
+ from typing import List, Union
27
+ from unittest import mock
28
+
29
+ import torch
30
+
31
+ import accelerate
32
+
33
+ from ..state import AcceleratorState, PartialState
34
+ from ..utils import (
35
+ gather,
36
+ is_bnb_available,
37
+ is_clearml_available,
38
+ is_comet_ml_available,
39
+ is_cuda_available,
40
+ is_datasets_available,
41
+ is_deepspeed_available,
42
+ is_dvclive_available,
43
+ is_mlu_available,
44
+ is_mps_available,
45
+ is_npu_available,
46
+ is_pandas_available,
47
+ is_pippy_available,
48
+ is_tensorboard_available,
49
+ is_timm_available,
50
+ is_torch_version,
51
+ is_torch_xla_available,
52
+ is_transformers_available,
53
+ is_wandb_available,
54
+ is_xpu_available,
55
+ str_to_bool,
56
+ )
57
+
58
+
59
+ def get_backend():
60
+ if is_torch_xla_available():
61
+ return "xla", torch.cuda.device_count(), torch.cuda.memory_allocated
62
+ elif is_cuda_available():
63
+ return "cuda", torch.cuda.device_count(), torch.cuda.memory_allocated
64
+ elif is_mps_available():
65
+ return "mps", 1, torch.mps.current_allocated_memory()
66
+ elif is_mlu_available():
67
+ return "mlu", torch.mlu.device_count(), torch.mlu.memory_allocated
68
+ elif is_npu_available():
69
+ return "npu", torch.npu.device_count(), torch.npu.memory_allocated
70
+ elif is_xpu_available():
71
+ return "xpu", torch.xpu.device_count(), torch.xpu.memory_allocated
72
+ else:
73
+ return "cpu", 1, 0
74
+
75
+
76
+ torch_device, device_count, memory_allocated_func = get_backend()
77
+
78
+
79
+ def get_launch_command(**kwargs) -> list:
80
+ """
81
+ Wraps around `kwargs` to help simplify launching from `subprocess`.
82
+
83
+ Example:
84
+ ```python
85
+ # returns ['accelerate', 'launch', '--num_processes=2', '--device_count=2']
86
+ get_launch_command(num_processes=2, device_count=2)
87
+ ```
88
+ """
89
+ command = ["accelerate", "launch"]
90
+ for k, v in kwargs.items():
91
+ if isinstance(v, bool) and v:
92
+ command.append(f"--{k}")
93
+ elif v is not None:
94
+ command.append(f"--{k}={v}")
95
+ return command
96
+
97
+
98
+ DEFAULT_LAUNCH_COMMAND = get_launch_command(num_processes=device_count)
99
+
100
+
101
+ def parse_flag_from_env(key, default=False):
102
+ try:
103
+ value = os.environ[key]
104
+ except KeyError:
105
+ # KEY isn't set, default to `default`.
106
+ _value = default
107
+ else:
108
+ # KEY is set, convert it to True or False.
109
+ try:
110
+ _value = str_to_bool(value)
111
+ except ValueError:
112
+ # More values are supported, but let's keep the message simple.
113
+ raise ValueError(f"If set, {key} must be yes or no.")
114
+ return _value
115
+
116
+
117
+ _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
118
+
119
+
120
+ def skip(test_case):
121
+ "Decorator that skips a test unconditionally"
122
+ return unittest.skip("Test was skipped")(test_case)
123
+
124
+
125
+ def slow(test_case):
126
+ """
127
+ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a
128
+ truthy value to run them.
129
+ """
130
+ return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
131
+
132
+
133
+ def require_cpu(test_case):
134
+ """
135
+ Decorator marking a test that must be only ran on the CPU. These tests are skipped when a GPU is available.
136
+ """
137
+ return unittest.skipUnless(torch_device == "cpu", "test requires only a CPU")(test_case)
138
+
139
+
140
+ def require_non_cpu(test_case):
141
+ """
142
+ Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
143
+ hardware accelerator available.
144
+ """
145
+ return unittest.skipUnless(torch_device != "cpu", "test requires a GPU")(test_case)
146
+
147
+
148
+ def require_cuda(test_case):
149
+ """
150
+ Decorator marking a test that requires CUDA. These tests are skipped when there are no GPU available or when
151
+ TorchXLA is available.
152
+ """
153
+ return unittest.skipUnless(is_cuda_available() and not is_torch_xla_available(), "test requires a GPU")(test_case)
154
+
155
+
156
+ def require_xpu(test_case):
157
+ """
158
+ Decorator marking a test that requires XPU. These tests are skipped when there are no XPU available.
159
+ """
160
+ return unittest.skipUnless(is_xpu_available(), "test requires a XPU")(test_case)
161
+
162
+
163
+ def require_non_xpu(test_case):
164
+ """
165
+ Decorator marking a test that should be skipped for XPU.
166
+ """
167
+ return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
168
+
169
+
170
+ def require_mlu(test_case):
171
+ """
172
+ Decorator marking a test that requires MLU. These tests are skipped when there are no MLU available.
173
+ """
174
+ return unittest.skipUnless(is_mlu_available(), "test require a MLU")(test_case)
175
+
176
+
177
+ def require_npu(test_case):
178
+ """
179
+ Decorator marking a test that requires NPU. These tests are skipped when there are no NPU available.
180
+ """
181
+ return unittest.skipUnless(is_npu_available(), "test require a NPU")(test_case)
182
+
183
+
184
+ def require_mps(test_case):
185
+ """
186
+ Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`
187
+ backend.
188
+ """
189
+ return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`")(test_case)
190
+
191
+
192
+ def require_huggingface_suite(test_case):
193
+ """
194
+ Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.
195
+ """
196
+ return unittest.skipUnless(
197
+ is_transformers_available() and is_datasets_available(),
198
+ "test requires the Hugging Face suite",
199
+ )(test_case)
200
+
201
+
202
+ def require_transformers(test_case):
203
+ """
204
+ Decorator marking a test that requires transformers. These tests are skipped when they are not.
205
+ """
206
+ return unittest.skipUnless(is_transformers_available(), "test requires the transformers library")(test_case)
207
+
208
+
209
+ def require_timm(test_case):
210
+ """
211
+ Decorator marking a test that requires transformers. These tests are skipped when they are not.
212
+ """
213
+ return unittest.skipUnless(is_timm_available(), "test requires the timm library")(test_case)
214
+
215
+
216
+ def require_bnb(test_case):
217
+ """
218
+ Decorator marking a test that requires bitsandbytes. These tests are skipped when they are not.
219
+ """
220
+ return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library")(test_case)
221
+
222
+
223
+ def require_tpu(test_case):
224
+ """
225
+ Decorator marking a test that requires TPUs. These tests are skipped when there are no TPUs available.
226
+ """
227
+ return unittest.skipUnless(is_torch_xla_available(check_is_tpu=True), "test requires TPU")(test_case)
228
+
229
+
230
+ def require_non_torch_xla(test_case):
231
+ """
232
+ Decorator marking a test as requiring an environment without TorchXLA. These tests are skipped when TorchXLA is
233
+ available.
234
+ """
235
+ return unittest.skipUnless(not is_torch_xla_available(), "test requires an env without TorchXLA")(test_case)
236
+
237
+
238
+ def require_single_device(test_case):
239
+ """
240
+ Decorator marking a test that requires a single device. These tests are skipped when there is no hardware
241
+ accelerator available or number of devices is more than one.
242
+ """
243
+ return unittest.skipUnless(torch_device != "cpu" and device_count == 1, "test requires a hardware accelerator")(
244
+ test_case
245
+ )
246
+
247
+
248
+ def require_single_gpu(test_case):
249
+ """
250
+ Decorator marking a test that requires CUDA on a single GPU. These tests are skipped when there are no GPU
251
+ available or number of GPUs is more than one.
252
+ """
253
+ return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU")(test_case)
254
+
255
+
256
+ def require_single_xpu(test_case):
257
+ """
258
+ Decorator marking a test that requires CUDA on a single XPU. These tests are skipped when there are no XPU
259
+ available or number of xPUs is more than one.
260
+ """
261
+ return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU")(test_case)
262
+
263
+
264
+ def require_multi_device(test_case):
265
+ """
266
+ Decorator marking a test that requires a multi-device setup. These tests are skipped on a machine without multiple
267
+ devices.
268
+ """
269
+ return unittest.skipUnless(device_count > 1, "test requires multiple hardware accelerators")(test_case)
270
+
271
+
272
+ def require_multi_gpu(test_case):
273
+ """
274
+ Decorator marking a test that requires a multi-GPU setup. These tests are skipped on a machine without multiple
275
+ GPUs.
276
+ """
277
+ return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
278
+
279
+
280
+ def require_multi_xpu(test_case):
281
+ """
282
+ Decorator marking a test that requires a multi-XPU setup. These tests are skipped on a machine without multiple
283
+ XPUs.
284
+ """
285
+ return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
286
+
287
+
288
+ def require_deepspeed(test_case):
289
+ """
290
+ Decorator marking a test that requires DeepSpeed installed. These tests are skipped when DeepSpeed isn't installed
291
+ """
292
+ return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed")(test_case)
293
+
294
+
295
+ def require_fsdp(test_case):
296
+ """
297
+ Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed
298
+ """
299
+ return unittest.skipUnless(is_torch_version(">=", "1.12.0"), "test requires torch version >= 1.12.0")(test_case)
300
+
301
+
302
+ def require_torch_min_version(test_case=None, version=None):
303
+ """
304
+ Decorator marking that a test requires a particular torch version to be tested. These tests are skipped when an
305
+ installed torch version is less than the required one.
306
+ """
307
+ if test_case is None:
308
+ return partial(require_torch_min_version, version=version)
309
+ return unittest.skipUnless(is_torch_version(">=", version), f"test requires torch version >= {version}")(test_case)
310
+
311
+
312
+ def require_tensorboard(test_case):
313
+ """
314
+ Decorator marking a test that requires tensorboard installed. These tests are skipped when tensorboard isn't
315
+ installed
316
+ """
317
+ return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard")(test_case)
318
+
319
+
320
+ def require_wandb(test_case):
321
+ """
322
+ Decorator marking a test that requires wandb installed. These tests are skipped when wandb isn't installed
323
+ """
324
+ return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
325
+
326
+
327
+ def require_comet_ml(test_case):
328
+ """
329
+ Decorator marking a test that requires comet_ml installed. These tests are skipped when comet_ml isn't installed
330
+ """
331
+ return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml")(test_case)
332
+
333
+
334
+ def require_clearml(test_case):
335
+ """
336
+ Decorator marking a test that requires clearml installed. These tests are skipped when clearml isn't installed
337
+ """
338
+ return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
339
+
340
+
341
+ def require_dvclive(test_case):
342
+ """
343
+ Decorator marking a test that requires dvclive installed. These tests are skipped when dvclive isn't installed
344
+ """
345
+ return unittest.skipUnless(is_dvclive_available(), "test requires dvclive")(test_case)
346
+
347
+
348
+ def require_pandas(test_case):
349
+ """
350
+ Decorator marking a test that requires pandas installed. These tests are skipped when pandas isn't installed
351
+ """
352
+ return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
353
+
354
+
355
+ def require_pippy(test_case):
356
+ """
357
+ Decorator marking a test that requires pippy installed. These tests are skipped when pippy isn't installed
358
+ """
359
+ return unittest.skipUnless(is_pippy_available(), "test requires pippy")(test_case)
360
+
361
+
362
+ _atleast_one_tracker_available = (
363
+ any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
364
+ )
365
+
366
+
367
+ def require_trackers(test_case):
368
+ """
369
+ Decorator marking that a test requires at least one tracking library installed. These tests are skipped when none
370
+ are installed
371
+ """
372
+ return unittest.skipUnless(
373
+ _atleast_one_tracker_available,
374
+ "test requires at least one tracker to be available and for `comet_ml` to not be installed",
375
+ )(test_case)
376
+
377
+
378
+ class TempDirTestCase(unittest.TestCase):
379
+ """
380
+ A TestCase class that keeps a single `tempfile.TemporaryDirectory` open for the duration of the class, wipes its
381
+ data at the start of a test, and then destroyes it at the end of the TestCase.
382
+
383
+ Useful for when a class or API requires a single constant folder throughout it's use, such as Weights and Biases
384
+
385
+ The temporary directory location will be stored in `self.tmpdir`
386
+ """
387
+
388
+ clear_on_setup = True
389
+
390
+ @classmethod
391
+ def setUpClass(cls):
392
+ "Creates a `tempfile.TemporaryDirectory` and stores it in `cls.tmpdir`"
393
+ cls.tmpdir = Path(tempfile.mkdtemp())
394
+
395
+ @classmethod
396
+ def tearDownClass(cls):
397
+ "Remove `cls.tmpdir` after test suite has finished"
398
+ if os.path.exists(cls.tmpdir):
399
+ shutil.rmtree(cls.tmpdir)
400
+
401
+ def setUp(self):
402
+ "Destroy all contents in `self.tmpdir`, but not `self.tmpdir`"
403
+ if self.clear_on_setup:
404
+ for path in self.tmpdir.glob("**/*"):
405
+ if path.is_file():
406
+ path.unlink()
407
+ elif path.is_dir():
408
+ shutil.rmtree(path)
409
+
410
+
411
+ class AccelerateTestCase(unittest.TestCase):
412
+ """
413
+ A TestCase class that will reset the accelerator state at the end of every test. Every test that checks or utilizes
414
+ the `AcceleratorState` class should inherit from this to avoid silent failures due to state being shared between
415
+ tests.
416
+ """
417
+
418
+ def tearDown(self):
419
+ super().tearDown()
420
+ # Reset the state of the AcceleratorState singleton.
421
+ AcceleratorState._reset_state()
422
+ PartialState._reset_state()
423
+
424
+
425
+ class MockingTestCase(unittest.TestCase):
426
+ """
427
+ A TestCase class designed to dynamically add various mockers that should be used in every test, mimicking the
428
+ behavior of a class-wide mock when defining one normally will not do.
429
+
430
+ Useful when a mock requires specific information available only initialized after `TestCase.setUpClass`, such as
431
+ setting an environment variable with that information.
432
+
433
+ The `add_mocks` function should be ran at the end of a `TestCase`'s `setUp` function, after a call to
434
+ `super().setUp()` such as:
435
+ ```python
436
+ def setUp(self):
437
+ super().setUp()
438
+ mocks = mock.patch.dict(os.environ, {"SOME_ENV_VAR", "SOME_VALUE"})
439
+ self.add_mocks(mocks)
440
+ ```
441
+ """
442
+
443
+ def add_mocks(self, mocks: Union[mock.Mock, List[mock.Mock]]):
444
+ """
445
+ Add custom mocks for tests that should be repeated on each test. Should be called during
446
+ `MockingTestCase.setUp`, after `super().setUp()`.
447
+
448
+ Args:
449
+ mocks (`mock.Mock` or list of `mock.Mock`):
450
+ Mocks that should be added to the `TestCase` after `TestCase.setUpClass` has been run
451
+ """
452
+ self.mocks = mocks if isinstance(mocks, (tuple, list)) else [mocks]
453
+ for m in self.mocks:
454
+ m.start()
455
+ self.addCleanup(m.stop)
456
+
457
+
458
+ def are_the_same_tensors(tensor):
459
+ state = AcceleratorState()
460
+ tensor = tensor[None].clone().to(state.device)
461
+ tensors = gather(tensor).cpu()
462
+ tensor = tensor[0].cpu()
463
+ for i in range(tensors.shape[0]):
464
+ if not torch.equal(tensors[i], tensor):
465
+ return False
466
+ return True
467
+
468
+
469
+ class _RunOutput:
470
+ def __init__(self, returncode, stdout, stderr):
471
+ self.returncode = returncode
472
+ self.stdout = stdout
473
+ self.stderr = stderr
474
+
475
+
476
+ async def _read_stream(stream, callback):
477
+ while True:
478
+ line = await stream.readline()
479
+ if line:
480
+ callback(line)
481
+ else:
482
+ break
483
+
484
+
485
+ async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
486
+ if echo:
487
+ print("\nRunning: ", " ".join(cmd))
488
+
489
+ p = await asyncio.create_subprocess_exec(
490
+ cmd[0],
491
+ *cmd[1:],
492
+ stdin=stdin,
493
+ stdout=asyncio.subprocess.PIPE,
494
+ stderr=asyncio.subprocess.PIPE,
495
+ env=env,
496
+ )
497
+
498
+ # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
499
+ # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
500
+ #
501
+ # If it starts hanging, will need to switch to the following code. The problem is that no data
502
+ # will be seen until it's done and if it hangs for example there will be no debug info.
503
+ # out, err = await p.communicate()
504
+ # return _RunOutput(p.returncode, out, err)
505
+
506
+ out = []
507
+ err = []
508
+
509
+ def tee(line, sink, pipe, label=""):
510
+ line = line.decode("utf-8").rstrip()
511
+ sink.append(line)
512
+ if not quiet:
513
+ print(label, line, file=pipe)
514
+
515
+ # XXX: the timeout doesn't seem to make any difference here
516
+ await asyncio.wait(
517
+ [
518
+ asyncio.create_task(_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:"))),
519
+ asyncio.create_task(_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:"))),
520
+ ],
521
+ timeout=timeout,
522
+ )
523
+ return _RunOutput(await p.wait(), out, err)
524
+
525
+
526
+ def execute_subprocess_async(cmd: list, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
527
+ # Cast every path in `cmd` to a string
528
+ for i, c in enumerate(cmd):
529
+ if isinstance(c, Path):
530
+ cmd[i] = str(c)
531
+ loop = asyncio.get_event_loop()
532
+ result = loop.run_until_complete(
533
+ _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
534
+ )
535
+
536
+ cmd_str = " ".join(cmd)
537
+ if result.returncode > 0:
538
+ stderr = "\n".join(result.stderr)
539
+ raise RuntimeError(
540
+ f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
541
+ f"The combined stderr from workers follows:\n{stderr}"
542
+ )
543
+
544
+ return result
545
+
546
+
547
+ class SubprocessCallException(Exception):
548
+ pass
549
+
550
+
551
+ def run_command(command: List[str], return_stdout=False, env=None):
552
+ """
553
+ Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
554
+ if an error occured while running `command`
555
+ """
556
+ # Cast every path in `command` to a string
557
+ for i, c in enumerate(command):
558
+ if isinstance(c, Path):
559
+ command[i] = str(c)
560
+ if env is None:
561
+ env = os.environ.copy()
562
+ try:
563
+ output = subprocess.check_output(command, stderr=subprocess.STDOUT, env=env)
564
+ if return_stdout:
565
+ if hasattr(output, "decode"):
566
+ output = output.decode("utf-8")
567
+ return output
568
+ except subprocess.CalledProcessError as e:
569
+ raise SubprocessCallException(
570
+ f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
571
+ ) from e
572
+
573
+
574
+ def path_in_accelerate_package(*components: str) -> Path:
575
+ """
576
+ Get a path within the `accelerate` package's directory.
577
+
578
+ Args:
579
+ *components: Components of the path to join after the package directory.
580
+
581
+ Returns:
582
+ `Path`: The path to the requested file or directory.
583
+ """
584
+
585
+ accelerate_package_dir = Path(inspect.getfile(accelerate)).parent
586
+ return accelerate_package_dir.joinpath(*components)
587
+
588
+
589
+ @contextmanager
590
+ def assert_exception(exception_class: Exception, msg: str = None) -> bool:
591
+ """
592
+ Context manager to assert that the right `Exception` class was raised.
593
+
594
+ If `msg` is provided, will check that the message is contained in the raised exception.
595
+ """
596
+ was_ran = False
597
+ try:
598
+ yield
599
+ was_ran = True
600
+ except Exception as e:
601
+ assert isinstance(e, exception_class), f"Expected exception of type {exception_class} but got {type(e)}"
602
+ if msg is not None:
603
+ assert msg in str(e), f"Expected message '{msg}' to be in exception but got '{str(e)}'"
604
+ if was_ran:
605
+ raise AssertionError(f"Expected exception of type {exception_class} but ran without issue.")
env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/training.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import numpy as np
16
+ import torch
17
+ from torch.utils.data import DataLoader
18
+
19
+ from accelerate.utils.dataclasses import DistributedType
20
+
21
+
22
+ class RegressionDataset:
23
+ def __init__(self, a=2, b=3, length=64, seed=None):
24
+ rng = np.random.default_rng(seed)
25
+ self.length = length
26
+ self.x = rng.normal(size=(length,)).astype(np.float32)
27
+ self.y = a * self.x + b + rng.normal(scale=0.1, size=(length,)).astype(np.float32)
28
+
29
+ def __len__(self):
30
+ return self.length
31
+
32
+ def __getitem__(self, i):
33
+ return {"x": self.x[i], "y": self.y[i]}
34
+
35
+
36
+ class RegressionModel4XPU(torch.nn.Module):
37
+ def __init__(self, a=0, b=0, double_output=False):
38
+ super().__init__()
39
+ self.a = torch.nn.Parameter(torch.tensor([2, 3]).float())
40
+ self.b = torch.nn.Parameter(torch.tensor([2, 3]).float())
41
+ self.first_batch = True
42
+
43
+ def forward(self, x=None):
44
+ if self.first_batch:
45
+ print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
46
+ self.first_batch = False
47
+ return x * self.a[0] + self.b[0]
48
+
49
+
50
+ class RegressionModel(torch.nn.Module):
51
+ def __init__(self, a=0, b=0, double_output=False):
52
+ super().__init__()
53
+ self.a = torch.nn.Parameter(torch.tensor(a).float())
54
+ self.b = torch.nn.Parameter(torch.tensor(b).float())
55
+ self.first_batch = True
56
+
57
+ def forward(self, x=None):
58
+ if self.first_batch:
59
+ print(f"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}")
60
+ self.first_batch = False
61
+ return x * self.a + self.b
62
+
63
+
64
+ def mocked_dataloaders(accelerator, batch_size: int = 16):
65
+ from datasets import load_dataset
66
+ from transformers import AutoTokenizer
67
+
68
+ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
69
+ data_files = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
70
+ datasets = load_dataset("csv", data_files=data_files)
71
+ label_list = datasets["train"].unique("label")
72
+
73
+ label_to_id = {v: i for i, v in enumerate(label_list)}
74
+
75
+ def tokenize_function(examples):
76
+ # max_length=None => use the model max length (it's actually the default)
77
+ outputs = tokenizer(
78
+ examples["sentence1"], examples["sentence2"], truncation=True, max_length=None, padding="max_length"
79
+ )
80
+ if "label" in examples:
81
+ outputs["labels"] = [label_to_id[l] for l in examples["label"]]
82
+ return outputs
83
+
84
+ # Apply the method we just defined to all the examples in all the splits of the dataset
85
+ tokenized_datasets = datasets.map(
86
+ tokenize_function,
87
+ batched=True,
88
+ remove_columns=["sentence1", "sentence2", "label"],
89
+ )
90
+
91
+ def collate_fn(examples):
92
+ # On TPU it's best to pad everything to the same length or training will be very slow.
93
+ if accelerator.distributed_type == DistributedType.XLA:
94
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
95
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
96
+
97
+ # Instantiate dataloaders.
98
+ train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=2)
99
+ eval_dataloader = DataLoader(tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=1)
100
+
101
+ return train_dataloader, eval_dataloader