applied-ai-018 commited on
Commit
0c481ef
·
verified ·
1 Parent(s): a1ea65c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. env-llmeval/bin/f2py +8 -0
  3. env-llmeval/bin/python3 +3 -0
  4. env-llmeval/bin/python3.10 +3 -0
  5. env-llmeval/lib/python3.10/site-packages/__editable___lm_eval_0_4_2_finder.py +79 -0
  6. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/accelerate/accelerator.py +0 -0
  22. env-llmeval/lib/python3.10/site-packages/accelerate/big_modeling.py +622 -0
  23. env-llmeval/lib/python3.10/site-packages/accelerate/commands/__init__.py +13 -0
  24. env-llmeval/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py +50 -0
  25. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__init__.py +52 -0
  26. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/cluster.py +705 -0
  27. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config.py +89 -0
  28. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_args.py +243 -0
  29. env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/default.py +133 -0
  30. env-llmeval/lib/python3.10/site-packages/accelerate/commands/env.py +107 -0
  31. env-llmeval/lib/python3.10/site-packages/accelerate/commands/estimate.py +309 -0
  32. env-llmeval/lib/python3.10/site-packages/accelerate/commands/launch.py +1085 -0
  33. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py +14 -0
  34. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py +65 -0
  41. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py +59 -0
  42. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/input.py +86 -0
  43. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py +133 -0
  44. env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py +144 -0
  45. env-llmeval/lib/python3.10/site-packages/accelerate/commands/test.py +65 -0
  46. env-llmeval/lib/python3.10/site-packages/accelerate/commands/tpu.py +157 -0
  47. env-llmeval/lib/python3.10/site-packages/accelerate/commands/utils.py +120 -0
  48. env-llmeval/lib/python3.10/site-packages/accelerate/data_loader.py +1093 -0
  49. env-llmeval/lib/python3.10/site-packages/accelerate/hooks.py +709 -0
  50. env-llmeval/lib/python3.10/site-packages/accelerate/inference.py +188 -0
.gitattributes CHANGED
@@ -115,3 +115,5 @@ llmeval-env/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs
115
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
116
  llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
117
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
 
 
 
115
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
116
  llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
117
  llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
118
+ env-llmeval/bin/python3 filter=lfs diff=lfs merge=lfs -text
119
+ env-llmeval/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
env-llmeval/bin/f2py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/home/sdp/llm_eval/env-llmeval/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import re
4
+ import sys
5
+ from numpy.f2py.f2py2e import main
6
+ if __name__ == '__main__':
7
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
8
+ sys.exit(main())
env-llmeval/bin/python3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91
3
+ size 5904904
env-llmeval/bin/python3.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91
3
+ size 5904904
env-llmeval/lib/python3.10/site-packages/__editable___lm_eval_0_4_2_finder.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from importlib.machinery import ModuleSpec, PathFinder
3
+ from importlib.machinery import all_suffixes as module_suffixes
4
+ from importlib.util import spec_from_file_location
5
+ from itertools import chain
6
+ from pathlib import Path
7
+
8
+ MAPPING = {'lm_eval': '/home/sdp/llm_eval/lm-evaluation/lm_eval'}
9
+ NAMESPACES = {'lm_eval.caching': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/caching'], 'lm_eval.tasks.agieval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/agieval'], 'lm_eval.tasks.openbookqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/openbookqa'], 'lm_eval.tasks.aexams': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/aexams'], 'lm_eval.tasks.wmdp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wmdp'], 'lm_eval.tasks.blimp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/blimp'], 'lm_eval.tasks.swag': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/swag'], 'lm_eval.tasks.bigbench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench'], 'lm_eval.tasks.lambada': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada'], 'lm_eval.tasks.hellaswag': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/hellaswag'], 'lm_eval.tasks.mgsm': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm'], 'lm_eval.tasks.xwinograd': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xwinograd'], 'lm_eval.tasks.tmmluplus': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/tmmluplus'], 'lm_eval.tasks.babi': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/babi'], 'lm_eval.tasks.xstorycloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xstorycloze'], 'lm_eval.tasks.haerae': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/haerae'], 'lm_eval.tasks.model_written_evals': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals'], 'lm_eval.tasks.kmmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu'], 'lm_eval.tasks.arithmetic': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/arithmetic'], 'lm_eval.tasks.gsm8k': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gsm8k'], 'lm_eval.tasks.prost': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/prost'], 'lm_eval.tasks.basqueglue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/basqueglue'], 'lm_eval.tasks.drop': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/drop'], 'lm_eval.tasks.french_bench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/french_bench'], 'lm_eval.tasks.race': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/race'], 'lm_eval.tasks.medmcqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/medmcqa'], 'lm_eval.tasks.eus_exams': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_exams'], 'lm_eval.tasks.scrolls': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/scrolls'], 'lm_eval.tasks.arc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/arc'], 'lm_eval.tasks.eus_proficiency': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_proficiency'], 'lm_eval.tasks.bbh': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh'], 'lm_eval.tasks.pile': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/pile'], 'lm_eval.tasks.headqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/headqa'], 'lm_eval.tasks.kobest': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kobest'], 'lm_eval.tasks.wsc273': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wsc273'], 'lm_eval.tasks.siqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/siqa'], 'lm_eval.tasks.sciq': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/sciq'], 'lm_eval.tasks.wmt2016': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wmt2016'], 'lm_eval.tasks.wikitext': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wikitext'], 'lm_eval.tasks.minerva_math': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/minerva_math'], 'lm_eval.tasks.paws-x': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/paws-x'], 'lm_eval.tasks.lambada_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada_multilingual'], 'lm_eval.tasks.triviaqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/triviaqa'], 'lm_eval.tasks.xnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xnli'], 'lm_eval.tasks.code_x_glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/code_x_glue'], 'lm_eval.tasks.qa4mre': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/qa4mre'], 'lm_eval.tasks.ifeval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ifeval'], 'lm_eval.tasks.cmmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/cmmlu'], 'lm_eval.tasks.medqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/medqa'], 'lm_eval.tasks.lambada_cloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada_cloze'], 'lm_eval.tasks.translation': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/translation'], 'lm_eval.tasks.nq_open': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/nq_open'], 'lm_eval.tasks.hendrycks_ethics': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/hendrycks_ethics'], 'lm_eval.tasks.okapi': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi'], 'lm_eval.tasks.crows_pairs': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/crows_pairs'], 'lm_eval.tasks.gpqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa'], 'lm_eval.tasks.asdiv': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/asdiv'], 'lm_eval.tasks.ceval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ceval'], 'lm_eval.tasks.eus_trivia': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_trivia'], 'lm_eval.tasks.eq_bench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eq_bench'], 'lm_eval.tasks.polemo2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/polemo2'], 'lm_eval.tasks.glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue'], 'lm_eval.tasks.csatqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/csatqa'], 'lm_eval.tasks.qasper': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/qasper'], 'lm_eval.tasks.eus_reading': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_reading'], 'lm_eval.tasks.logiqa2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/logiqa2'], 'lm_eval.tasks.super_glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue'], 'lm_eval.tasks.aclue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/aclue'], 'lm_eval.tasks.piqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/piqa'], 'lm_eval.tasks.mc_taco': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mc_taco'], 'lm_eval.tasks.benchmarks': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks'], 'lm_eval.tasks.truthfulqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/truthfulqa'], 'lm_eval.tasks.logiqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/logiqa'], 'lm_eval.tasks.mmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu'], 'lm_eval.tasks.coqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/coqa'], 'lm_eval.tasks.squadv2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/squadv2'], 'lm_eval.tasks.belebele': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/belebele'], 'lm_eval.tasks.fld': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/fld'], 'lm_eval.tasks.winogrande': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/winogrande'], 'lm_eval.tasks.mutual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mutual'], 'lm_eval.tasks.webqs': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/webqs'], 'lm_eval.tasks.unscramble': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/unscramble'], 'lm_eval.tasks.realtoxicityprompts': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/realtoxicityprompts'], 'lm_eval.tasks.storycloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/storycloze'], 'lm_eval.tasks.anli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/anli'], 'lm_eval.tasks.mathqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mathqa'], 'lm_eval.tasks.ammlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ammlu'], 'lm_eval.tasks.pubmedqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/pubmedqa'], 'lm_eval.tasks.xcopa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xcopa'], 'lm_eval.tasks.toxigen': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/toxigen'], 'lm_eval.tasks.kormedmcqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kormedmcqa'], 'lm_eval.tasks.bigbench.generate_until': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench/generate_until'], 'lm_eval.tasks.bigbench.multiple_choice': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice'], 'lm_eval.tasks.mgsm.direct': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/direct'], 'lm_eval.tasks.mgsm.native_cot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/native_cot'], 'lm_eval.tasks.mgsm.en_cot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/en_cot'], 'lm_eval.tasks.tmmluplus.default': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/tmmluplus/default'], 'lm_eval.tasks.model_written_evals.persona': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/persona'], 'lm_eval.tasks.model_written_evals.sycophancy': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/sycophancy'], 'lm_eval.tasks.model_written_evals.advanced_ai_risk': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk'], 'lm_eval.tasks.model_written_evals.winogenerated': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/winogenerated'], 'lm_eval.tasks.kmmlu.direct': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/direct'], 'lm_eval.tasks.kmmlu.direct_hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard'], 'lm_eval.tasks.kmmlu.cot_hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/cot_hard'], 'lm_eval.tasks.kmmlu.hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/hard'], 'lm_eval.tasks.bbh.cot_fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/cot_fewshot'], 'lm_eval.tasks.bbh.fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/fewshot'], 'lm_eval.tasks.bbh.cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot'], 'lm_eval.tasks.bbh.zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/zeroshot'], 'lm_eval.tasks.code_x_glue.code-text': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/code_x_glue/code-text'], 'lm_eval.tasks.okapi.arc_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/arc_multilingual'], 'lm_eval.tasks.okapi.mmlu_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/mmlu_multilingual'], 'lm_eval.tasks.okapi.hellaswag_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/hellaswag_multilingual'], 'lm_eval.tasks.okapi.truthfulqa_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/truthfulqa_multilingual'], 'lm_eval.tasks.gpqa.generative': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/generative'], 'lm_eval.tasks.gpqa.n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/n_shot'], 'lm_eval.tasks.gpqa.cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot'], 'lm_eval.tasks.gpqa.cot_n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot'], 'lm_eval.tasks.gpqa.zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/zeroshot'], 'lm_eval.tasks.glue.mrpc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/mrpc'], 'lm_eval.tasks.glue.qqp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/qqp'], 'lm_eval.tasks.glue.rte': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/rte'], 'lm_eval.tasks.glue.sst2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/sst2'], 'lm_eval.tasks.glue.mnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/mnli'], 'lm_eval.tasks.glue.qnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/qnli'], 'lm_eval.tasks.glue.cola': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/cola'], 'lm_eval.tasks.glue.wnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/wnli'], 'lm_eval.tasks.super_glue.multirc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/multirc'], 'lm_eval.tasks.super_glue.wic': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/wic'], 'lm_eval.tasks.super_glue.record': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/record'], 'lm_eval.tasks.super_glue.rte': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/rte'], 'lm_eval.tasks.super_glue.wsc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/wsc'], 'lm_eval.tasks.super_glue.cb': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/cb'], 'lm_eval.tasks.super_glue.boolq': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/boolq'], 'lm_eval.tasks.super_glue.copa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/copa'], 'lm_eval.tasks.benchmarks.flan': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks/flan'], 'lm_eval.tasks.benchmarks.multimedqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks/multimedqa'], 'lm_eval.tasks.mmlu.flan_cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot'], 'lm_eval.tasks.mmlu.default': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/default'], 'lm_eval.tasks.mmlu.flan_cot_fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_fewshot'], 'lm_eval.tasks.mmlu.flan_n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot'], 'lm_eval.tasks.mmlu.flan_n_shot.loglikelihood': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood'], 'lm_eval.tasks.mmlu.flan_n_shot.generative': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/generative']}
10
+ PATH_PLACEHOLDER = '__editable__.lm_eval-0.4.2.finder' + ".__path_hook__"
11
+
12
+
13
+ class _EditableFinder: # MetaPathFinder
14
+ @classmethod
15
+ def find_spec(cls, fullname, path=None, target=None):
16
+ extra_path = []
17
+
18
+ # Top-level packages and modules (we know these exist in the FS)
19
+ if fullname in MAPPING:
20
+ pkg_path = MAPPING[fullname]
21
+ return cls._find_spec(fullname, Path(pkg_path))
22
+
23
+ # Handle immediate children modules (required for namespaces to work)
24
+ # To avoid problems with case sensitivity in the file system we delegate
25
+ # to the importlib.machinery implementation.
26
+ parent, _, child = fullname.rpartition(".")
27
+ if parent and parent in MAPPING:
28
+ return PathFinder.find_spec(fullname, path=[MAPPING[parent], *extra_path])
29
+
30
+ # Other levels of nesting should be handled automatically by importlib
31
+ # using the parent path.
32
+ return None
33
+
34
+ @classmethod
35
+ def _find_spec(cls, fullname, candidate_path):
36
+ init = candidate_path / "__init__.py"
37
+ candidates = (candidate_path.with_suffix(x) for x in module_suffixes())
38
+ for candidate in chain([init], candidates):
39
+ if candidate.exists():
40
+ return spec_from_file_location(fullname, candidate)
41
+
42
+
43
+ class _EditableNamespaceFinder: # PathEntryFinder
44
+ @classmethod
45
+ def _path_hook(cls, path):
46
+ if path == PATH_PLACEHOLDER:
47
+ return cls
48
+ raise ImportError
49
+
50
+ @classmethod
51
+ def _paths(cls, fullname):
52
+ # Ensure __path__ is not empty for the spec to be considered a namespace.
53
+ return NAMESPACES[fullname] or MAPPING.get(fullname) or [PATH_PLACEHOLDER]
54
+
55
+ @classmethod
56
+ def find_spec(cls, fullname, target=None):
57
+ if fullname in NAMESPACES:
58
+ spec = ModuleSpec(fullname, None, is_package=True)
59
+ spec.submodule_search_locations = cls._paths(fullname)
60
+ return spec
61
+ return None
62
+
63
+ @classmethod
64
+ def find_module(cls, fullname):
65
+ return None
66
+
67
+
68
+ def install():
69
+ if not any(finder == _EditableFinder for finder in sys.meta_path):
70
+ sys.meta_path.append(_EditableFinder)
71
+
72
+ if not NAMESPACES:
73
+ return
74
+
75
+ if not any(hook == _EditableNamespaceFinder._path_hook for hook in sys.path_hooks):
76
+ # PathEntryFinder is needed to create NamespaceSpec without private APIS
77
+ sys.path_hooks.append(_EditableNamespaceFinder._path_hook)
78
+ if PATH_PLACEHOLDER not in sys.path:
79
+ sys.path.append(PATH_PLACEHOLDER) # Used just to trigger the path hook
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc ADDED
Binary file (108 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc ADDED
Binary file (8.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc ADDED
Binary file (6.08 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc ADDED
Binary file (8.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc ADDED
Binary file (3.61 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc ADDED
Binary file (428 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc ADDED
Binary file (39.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc ADDED
Binary file (37.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/accelerator.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/accelerate/big_modeling.py ADDED
@@ -0,0 +1,622 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ import os
17
+ from contextlib import contextmanager
18
+ from functools import wraps
19
+ from typing import Dict, List, Optional, Union
20
+
21
+ import torch
22
+ import torch.nn as nn
23
+
24
+ from .hooks import (
25
+ AlignDevicesHook,
26
+ CpuOffload,
27
+ UserCpuOffloadHook,
28
+ add_hook_to_module,
29
+ attach_align_device_hook,
30
+ attach_align_device_hook_on_blocks,
31
+ )
32
+ from .utils import (
33
+ OffloadedWeightsLoader,
34
+ check_cuda_p2p_ib_support,
35
+ check_device_map,
36
+ extract_submodules_state_dict,
37
+ find_tied_parameters,
38
+ get_balanced_memory,
39
+ infer_auto_device_map,
40
+ is_mlu_available,
41
+ is_npu_available,
42
+ is_torch_version,
43
+ is_xpu_available,
44
+ load_checkpoint_in_model,
45
+ offload_state_dict,
46
+ parse_flag_from_env,
47
+ retie_parameters,
48
+ )
49
+ from .utils.other import recursive_getattr
50
+
51
+
52
+ logger = logging.getLogger(__name__)
53
+
54
+
55
+ @contextmanager
56
+ def init_empty_weights(include_buffers: bool = None):
57
+ """
58
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
59
+ empty model. Useful when just initializing the model would blow the available RAM.
60
+
61
+ Args:
62
+ include_buffers (`bool`, *optional*):
63
+ Whether or not to also put all buffers on the meta device while initializing.
64
+
65
+ Example:
66
+
67
+ ```python
68
+ import torch.nn as nn
69
+ from accelerate import init_empty_weights
70
+
71
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
72
+ with init_empty_weights():
73
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
74
+ ```
75
+
76
+ <Tip warning={true}>
77
+
78
+ Any model created under this context manager has no weights. As such you can't do something like
79
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
80
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
81
+ called.
82
+
83
+ </Tip>
84
+ """
85
+ if include_buffers is None:
86
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
87
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
88
+ yield f
89
+
90
+
91
+ @contextmanager
92
+ def init_on_device(device: torch.device, include_buffers: bool = None):
93
+ """
94
+ A context manager under which models are initialized with all parameters on the specified device.
95
+
96
+ Args:
97
+ device (`torch.device`):
98
+ Device to initialize all parameters on.
99
+ include_buffers (`bool`, *optional*):
100
+ Whether or not to also put all buffers on the meta device while initializing.
101
+
102
+ Example:
103
+
104
+ ```python
105
+ import torch.nn as nn
106
+ from accelerate import init_on_device
107
+
108
+ with init_on_device(device=torch.device("cuda")):
109
+ tst = nn.Liner(100, 100) # on `cuda` device
110
+ ```
111
+ """
112
+ if include_buffers is None:
113
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
114
+
115
+ # TODO(shingjan): remove the torch version check once older versions are deprecated
116
+ if is_torch_version(">=", "2.0") and include_buffers:
117
+ with device:
118
+ yield
119
+ return
120
+
121
+ old_register_parameter = nn.Module.register_parameter
122
+ if include_buffers:
123
+ old_register_buffer = nn.Module.register_buffer
124
+
125
+ def register_empty_parameter(module, name, param):
126
+ old_register_parameter(module, name, param)
127
+ if param is not None:
128
+ param_cls = type(module._parameters[name])
129
+ kwargs = module._parameters[name].__dict__
130
+ kwargs["requires_grad"] = param.requires_grad
131
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
132
+
133
+ def register_empty_buffer(module, name, buffer, persistent=True):
134
+ old_register_buffer(module, name, buffer, persistent=persistent)
135
+ if buffer is not None:
136
+ module._buffers[name] = module._buffers[name].to(device)
137
+
138
+ # Patch tensor creation
139
+ if include_buffers:
140
+ tensor_constructors_to_patch = {
141
+ torch_function_name: getattr(torch, torch_function_name)
142
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
143
+ }
144
+ else:
145
+ tensor_constructors_to_patch = {}
146
+
147
+ def patch_tensor_constructor(fn):
148
+ def wrapper(*args, **kwargs):
149
+ kwargs["device"] = device
150
+ return fn(*args, **kwargs)
151
+
152
+ return wrapper
153
+
154
+ try:
155
+ nn.Module.register_parameter = register_empty_parameter
156
+ if include_buffers:
157
+ nn.Module.register_buffer = register_empty_buffer
158
+ for torch_function_name in tensor_constructors_to_patch.keys():
159
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
160
+ yield
161
+ finally:
162
+ nn.Module.register_parameter = old_register_parameter
163
+ if include_buffers:
164
+ nn.Module.register_buffer = old_register_buffer
165
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
166
+ setattr(torch, torch_function_name, old_torch_function)
167
+
168
+
169
+ def cpu_offload(
170
+ model: nn.Module,
171
+ execution_device: Optional[torch.device] = None,
172
+ offload_buffers: bool = False,
173
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
174
+ preload_module_classes: Optional[List[str]] = None,
175
+ ):
176
+ """
177
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
178
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
179
+ state dict and put on the execution device passed as they are needed, then offloaded again.
180
+
181
+ Args:
182
+ model (`torch.nn.Module`):
183
+ The model to offload.
184
+ execution_device (`torch.device`, *optional*):
185
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
186
+ model first parameter device.
187
+ offload_buffers (`bool`, *optional*, defaults to `False`):
188
+ Whether or not to offload the buffers with the model parameters.
189
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
190
+ The state dict of the model that will be kept on CPU.
191
+ preload_module_classes (`List[str]`, *optional*):
192
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
193
+ of the forward. This should only be used for classes that have submodules which are registered but not
194
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
195
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
196
+ """
197
+ if execution_device is None:
198
+ execution_device = next(iter(model.parameters())).device
199
+ if state_dict is None:
200
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
201
+
202
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
203
+ attach_align_device_hook(
204
+ model,
205
+ execution_device=execution_device,
206
+ offload=True,
207
+ offload_buffers=offload_buffers,
208
+ weights_map=state_dict,
209
+ preload_module_classes=preload_module_classes,
210
+ )
211
+
212
+ return model
213
+
214
+
215
+ def cpu_offload_with_hook(
216
+ model: torch.nn.Module,
217
+ execution_device: Optional[Union[int, str, torch.device]] = None,
218
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
219
+ ):
220
+ """
221
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
222
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
223
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
224
+
225
+ Args:
226
+ model (`torch.nn.Module`):
227
+ The model to offload.
228
+ execution_device(`str`, `int` or `torch.device`, *optional*):
229
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
230
+ GPU 0 if there is a GPU, and finally to the CPU.
231
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
232
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
233
+ offload method will be called just before the forward of the model to which this hook is attached.
234
+
235
+ Example:
236
+
237
+ ```py
238
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
239
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
240
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
241
+
242
+ hid_1 = model_1(input)
243
+ for i in range(50):
244
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
245
+ hid_2 = model_2(hid_1)
246
+ # model2 is offloaded to the CPU just before this forward.
247
+ hid_3 = model_3(hid_3)
248
+
249
+ # For model3, you need to manually call the hook offload method.
250
+ hook_3.offload()
251
+ ```
252
+ """
253
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
254
+ add_hook_to_module(model, hook, append=True)
255
+ user_hook = UserCpuOffloadHook(model, hook)
256
+ return model, user_hook
257
+
258
+
259
+ def disk_offload(
260
+ model: nn.Module,
261
+ offload_dir: Union[str, os.PathLike],
262
+ execution_device: Optional[torch.device] = None,
263
+ offload_buffers: bool = False,
264
+ preload_module_classes: Optional[List[str]] = None,
265
+ ):
266
+ """
267
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
268
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
269
+ put on the execution device passed as they are needed, then offloaded again.
270
+
271
+ Args:
272
+ model (`torch.nn.Module`): The model to offload.
273
+ offload_dir (`str` or `os.PathLike`):
274
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
275
+ execution_device (`torch.device`, *optional*):
276
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
277
+ model's first parameter device.
278
+ offload_buffers (`bool`, *optional*, defaults to `False`):
279
+ Whether or not to offload the buffers with the model parameters.
280
+ preload_module_classes (`List[str]`, *optional*):
281
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
282
+ of the forward. This should only be used for classes that have submodules which are registered but not
283
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
284
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
285
+ """
286
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
287
+ offload_state_dict(offload_dir, model.state_dict())
288
+ if execution_device is None:
289
+ execution_device = next(iter(model.parameters())).device
290
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
291
+
292
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
293
+ attach_align_device_hook(
294
+ model,
295
+ execution_device=execution_device,
296
+ offload=True,
297
+ offload_buffers=offload_buffers,
298
+ weights_map=weights_map,
299
+ preload_module_classes=preload_module_classes,
300
+ )
301
+
302
+ return model
303
+
304
+
305
+ def dispatch_model(
306
+ model: nn.Module,
307
+ device_map: Dict[str, Union[str, int, torch.device]],
308
+ main_device: Optional[torch.device] = None,
309
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
310
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
311
+ offload_index: Optional[Dict[str, str]] = None,
312
+ offload_buffers: bool = False,
313
+ skip_keys: Optional[Union[str, List[str]]] = None,
314
+ preload_module_classes: Optional[List[str]] = None,
315
+ force_hooks: bool = False,
316
+ ):
317
+ """
318
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
319
+ the CPU or even the disk.
320
+
321
+ Args:
322
+ model (`torch.nn.Module`):
323
+ The model to dispatch.
324
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
325
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
326
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
327
+ main_device (`str`, `int` or `torch.device`, *optional*):
328
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
329
+ `"disk"`.
330
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
331
+ The state dict of the part of the model that will be kept on CPU.
332
+ offload_dir (`str` or `os.PathLike`):
333
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
334
+ offload_index (`Dict`, *optional*):
335
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
336
+ to the index saved in `save_folder`.
337
+ offload_buffers (`bool`, *optional*, defaults to `False`):
338
+ Whether or not to offload the buffers with the model parameters.
339
+ skip_keys (`str` or `List[str]`, *optional*):
340
+ A list of keys to ignore when moving inputs or outputs between devices.
341
+ preload_module_classes (`List[str]`, *optional*):
342
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
343
+ of the forward. This should only be used for classes that have submodules which are registered but not
344
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
345
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
346
+ force_hooks (`bool`, *optional*, defaults to `False`):
347
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
348
+ single device.
349
+ """
350
+ # Error early if the device map is incomplete.
351
+ check_device_map(model, device_map)
352
+
353
+ # for backward compatibility
354
+ is_bnb_quantized = (
355
+ getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
356
+ ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
357
+
358
+ # We attach hooks if the device_map has at least 2 different devices or if
359
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
360
+ # in the unique device and the user can decide where to dispatch the model.
361
+ # If the model is quantized, we always force-dispatch the model
362
+ if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
363
+ if main_device is None:
364
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
365
+ main_device = "cpu"
366
+ else:
367
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
368
+
369
+ if main_device != "cpu":
370
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
371
+ if state_dict is None and len(cpu_modules) > 0:
372
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
373
+
374
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
375
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
376
+ raise ValueError(
377
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
378
+ f"need to be offloaded: {', '.join(disk_modules)}."
379
+ )
380
+ if (
381
+ len(disk_modules) > 0
382
+ and offload_index is None
383
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
384
+ ):
385
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
386
+ offload_state_dict(offload_dir, disk_state_dict)
387
+
388
+ execution_device = {
389
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
390
+ }
391
+ execution_device[""] = main_device
392
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
393
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
394
+ save_folder = offload_dir if len(disk_modules) > 0 else None
395
+ if state_dict is not None or save_folder is not None or offload_index is not None:
396
+ device = main_device if offload_index is not None else None
397
+ weights_map = OffloadedWeightsLoader(
398
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
399
+ )
400
+ else:
401
+ weights_map = None
402
+
403
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
404
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
405
+ # original pointer) on each devices.
406
+ tied_params = find_tied_parameters(model)
407
+
408
+ tied_params_map = {}
409
+ for group in tied_params:
410
+ for param_name in group:
411
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
412
+ # to care about views of tensors through storage_offset.
413
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
414
+ tied_params_map[data_ptr] = {}
415
+
416
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
417
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
418
+
419
+ attach_align_device_hook_on_blocks(
420
+ model,
421
+ execution_device=execution_device,
422
+ offload=offload,
423
+ offload_buffers=offload_buffers,
424
+ weights_map=weights_map,
425
+ skip_keys=skip_keys,
426
+ preload_module_classes=preload_module_classes,
427
+ tied_params_map=tied_params_map,
428
+ )
429
+
430
+ # warn if there is any params on the meta device
431
+ offloaded_devices_str = " and ".join(
432
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
433
+ )
434
+ if len(offloaded_devices_str) > 0:
435
+ logging.warning(
436
+ f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
437
+ )
438
+
439
+ # Attaching the hook may break tied weights, so we retie them
440
+ retie_parameters(model, tied_params)
441
+
442
+ # add warning to cuda and to method
443
+ def add_warning(fn, model):
444
+ @wraps(fn)
445
+ def wrapper(*args, **kwargs):
446
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
447
+ if str(fn.__name__) == "to":
448
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
449
+ if to_device is not None:
450
+ logger.warning(warning_msg)
451
+ else:
452
+ logger.warning(warning_msg)
453
+ for param in model.parameters():
454
+ if param.device == torch.device("meta"):
455
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
456
+ return fn(*args, **kwargs)
457
+
458
+ return wrapper
459
+
460
+ model.to = add_warning(model.to, model)
461
+ if is_npu_available():
462
+ model.npu = add_warning(model.npu, model)
463
+ elif is_mlu_available():
464
+ model.mlu = add_warning(model.mlu, model)
465
+ elif is_xpu_available():
466
+ model.xpu = add_warning(model.xpu, model)
467
+ else:
468
+ model.cuda = add_warning(model.cuda, model)
469
+
470
+ # Check if we are using multi-gpus with RTX 4000 series
471
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
472
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
473
+ logger.warning(
474
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
475
+ "This can affect the multi-gpu inference when using accelerate device_map."
476
+ "Please make sure to update your driver to the latest version which resolves this."
477
+ )
478
+ else:
479
+ device = list(device_map.values())[0]
480
+ # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
481
+ if is_npu_available() and isinstance(device, int):
482
+ device = f"npu:{device}"
483
+ elif is_mlu_available() and isinstance(device, int):
484
+ device = f"mlu:{device}"
485
+ elif is_xpu_available() and isinstance(device, int):
486
+ device = f"xpu:{device}"
487
+ if device != "disk":
488
+ model.to(device)
489
+ else:
490
+ raise ValueError(
491
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
492
+ )
493
+ # Convert OrderedDict back to dict for easier usage
494
+ model.hf_device_map = dict(device_map)
495
+ return model
496
+
497
+
498
+ def load_checkpoint_and_dispatch(
499
+ model: nn.Module,
500
+ checkpoint: Union[str, os.PathLike],
501
+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
502
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
503
+ no_split_module_classes: Optional[List[str]] = None,
504
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
505
+ offload_buffers: bool = False,
506
+ dtype: Optional[Union[str, torch.dtype]] = None,
507
+ offload_state_dict: Optional[bool] = None,
508
+ skip_keys: Optional[Union[str, List[str]]] = None,
509
+ preload_module_classes: Optional[List[str]] = None,
510
+ force_hooks: bool = False,
511
+ ):
512
+ """
513
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
514
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
515
+
516
+ Args:
517
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
518
+ checkpoint (`str` or `os.PathLike`):
519
+ The folder checkpoint to load. It can be:
520
+ - a path to a file containing a whole model state dict
521
+ - a path to a `.json` file containing the index to a sharded checkpoint
522
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
523
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
524
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
525
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
526
+
527
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
528
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
529
+ Defaults to None, which means [`dispatch_model`] will not be called.
530
+ max_memory (`Dict`, *optional*):
531
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
532
+ and the available CPU RAM if unset.
533
+ no_split_module_classes (`List[str]`, *optional*):
534
+ A list of layer class names that should never be split across device (for instance any layer that has a
535
+ residual connection).
536
+ offload_folder (`str` or `os.PathLike`, *optional*):
537
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
538
+ offload_buffers (`bool`, *optional*, defaults to `False`):
539
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
540
+ well as the parameters.
541
+ dtype (`str` or `torch.dtype`, *optional*):
542
+ If provided, the weights will be converted to that type when loaded.
543
+ offload_state_dict (`bool`, *optional*):
544
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
545
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
546
+ picked contains `"disk"` values.
547
+ skip_keys (`str` or `List[str]`, *optional*):
548
+ A list of keys to ignore when moving inputs or outputs between devices.
549
+ preload_module_classes (`List[str]`, *optional*):
550
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
551
+ of the forward. This should only be used for classes that have submodules which are registered but not
552
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
553
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
554
+ force_hooks (`bool`, *optional*, defaults to `False`):
555
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
556
+ single device.
557
+
558
+ Example:
559
+
560
+ ```python
561
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
562
+ >>> from huggingface_hub import hf_hub_download
563
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
564
+
565
+ >>> # Download the Weights
566
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
567
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
568
+
569
+ >>> # Create a model and initialize it with empty weights
570
+ >>> config = AutoConfig.from_pretrained(checkpoint)
571
+ >>> with init_empty_weights():
572
+ ... model = AutoModelForCausalLM.from_config(config)
573
+
574
+ >>> # Load the checkpoint and dispatch it to the right devices
575
+ >>> model = load_checkpoint_and_dispatch(
576
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
577
+ ... )
578
+ ```
579
+ """
580
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
581
+ raise ValueError(
582
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
583
+ "'sequential'."
584
+ )
585
+ if isinstance(device_map, str):
586
+ if device_map != "sequential":
587
+ max_memory = get_balanced_memory(
588
+ model,
589
+ max_memory=max_memory,
590
+ no_split_module_classes=no_split_module_classes,
591
+ dtype=dtype,
592
+ low_zero=(device_map == "balanced_low_0"),
593
+ )
594
+ device_map = infer_auto_device_map(
595
+ model,
596
+ max_memory=max_memory,
597
+ no_split_module_classes=no_split_module_classes,
598
+ dtype=dtype,
599
+ offload_buffers=offload_buffers,
600
+ )
601
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
602
+ offload_state_dict = True
603
+ load_checkpoint_in_model(
604
+ model,
605
+ checkpoint,
606
+ device_map=device_map,
607
+ offload_folder=offload_folder,
608
+ dtype=dtype,
609
+ offload_state_dict=offload_state_dict,
610
+ offload_buffers=offload_buffers,
611
+ )
612
+ if device_map is None:
613
+ return model
614
+ return dispatch_model(
615
+ model,
616
+ device_map=device_map,
617
+ offload_dir=offload_folder,
618
+ offload_buffers=offload_buffers,
619
+ skip_keys=skip_keys,
620
+ preload_module_classes=preload_module_classes,
621
+ force_hooks=force_hooks,
622
+ )
env-llmeval/lib/python3.10/site-packages/accelerate/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
env-llmeval/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from accelerate.commands.config import get_config_parser
18
+ from accelerate.commands.env import env_command_parser
19
+ from accelerate.commands.estimate import estimate_command_parser
20
+ from accelerate.commands.launch import launch_command_parser
21
+ from accelerate.commands.test import test_command_parser
22
+ from accelerate.commands.tpu import tpu_command_parser
23
+ from accelerate.commands.utils import CustomArgumentParser
24
+
25
+
26
+ def main():
27
+ parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate <command> [<args>]", allow_abbrev=False)
28
+ subparsers = parser.add_subparsers(help="accelerate command helpers")
29
+
30
+ # Register commands
31
+ get_config_parser(subparsers=subparsers)
32
+ estimate_command_parser(subparsers=subparsers)
33
+ env_command_parser(subparsers=subparsers)
34
+ launch_command_parser(subparsers=subparsers)
35
+ tpu_command_parser(subparsers=subparsers)
36
+ test_command_parser(subparsers=subparsers)
37
+
38
+ # Let's go
39
+ args = parser.parse_args()
40
+
41
+ if not hasattr(args, "func"):
42
+ parser.print_help()
43
+ exit(1)
44
+
45
+ # Run
46
+ args.func(args)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__init__.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from .config import config_command_parser
20
+ from .config_args import default_config_file, load_config_from_file # noqa: F401
21
+ from .default import default_command_parser
22
+ from .update import update_command_parser
23
+
24
+
25
+ def get_config_parser(subparsers=None):
26
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
27
+ # The main config parser
28
+ config_parser = config_command_parser(subparsers)
29
+ # The subparser to add commands to
30
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
31
+
32
+ # Then add other parsers with the parent parser
33
+ default_command_parser(subcommands, parents=[parent_parser])
34
+ update_command_parser(subcommands, parents=[parent_parser])
35
+
36
+ return config_parser
37
+
38
+
39
+ def main():
40
+ config_parser = get_config_parser()
41
+ args = config_parser.parse_args()
42
+
43
+ if not hasattr(args, "func"):
44
+ config_parser.print_help()
45
+ exit(1)
46
+
47
+ # Run
48
+ args.func(args)
49
+
50
+
51
+ if __name__ == "__main__":
52
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/cluster.py ADDED
@@ -0,0 +1,705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import os
18
+
19
+ from ...utils import (
20
+ ComputeEnvironment,
21
+ DistributedType,
22
+ is_deepspeed_available,
23
+ is_mlu_available,
24
+ is_mps_available,
25
+ is_npu_available,
26
+ is_transformers_available,
27
+ is_xpu_available,
28
+ )
29
+ from ...utils.constants import (
30
+ DEEPSPEED_MULTINODE_LAUNCHERS,
31
+ FSDP_AUTO_WRAP_POLICY,
32
+ FSDP_BACKWARD_PREFETCH,
33
+ FSDP_SHARDING_STRATEGY,
34
+ FSDP_STATE_DICT_TYPE,
35
+ TORCH_DYNAMO_MODES,
36
+ )
37
+ from .config_args import ClusterConfig
38
+ from .config_utils import (
39
+ DYNAMO_BACKENDS,
40
+ _ask_field,
41
+ _ask_options,
42
+ _convert_distributed_mode,
43
+ _convert_dynamo_backend,
44
+ _convert_mixed_precision,
45
+ _convert_yes_no_to_bool,
46
+ )
47
+
48
+
49
+ def get_cluster_input():
50
+ distributed_type = _ask_options(
51
+ "Which type of machine are you using?",
52
+ ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
53
+ _convert_distributed_mode,
54
+ )
55
+
56
+ machine_rank = 0
57
+ num_machines = 1
58
+ num_processes = 1
59
+ gpu_ids = None
60
+ main_process_ip = None
61
+ main_process_port = None
62
+ rdzv_backend = "static"
63
+ same_network = True
64
+ debug = False
65
+
66
+ if distributed_type in [
67
+ DistributedType.MULTI_GPU,
68
+ DistributedType.MULTI_MLU,
69
+ DistributedType.MULTI_NPU,
70
+ DistributedType.MULTI_XPU,
71
+ DistributedType.MULTI_CPU,
72
+ ]:
73
+ num_machines = _ask_field(
74
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
75
+ int,
76
+ default=1,
77
+ )
78
+ if num_machines > 1:
79
+ machine_rank = _ask_options(
80
+ "What is the rank of this machine?",
81
+ list(range(num_machines)),
82
+ int,
83
+ )
84
+ main_process_ip = _ask_field(
85
+ "What is the IP address of the machine that will host the main process? ",
86
+ )
87
+ main_process_port = _ask_field(
88
+ "What is the port you will use to communicate with the main process? ",
89
+ int,
90
+ )
91
+ same_network = _ask_field(
92
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
93
+ _convert_yes_no_to_bool,
94
+ default=True,
95
+ error_message="Please enter yes or no.",
96
+ )
97
+ if not same_network:
98
+ rdzv_backend = _ask_field(
99
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
100
+ )
101
+ debug = _ask_field(
102
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
103
+ _convert_yes_no_to_bool,
104
+ default=False,
105
+ error_message="Please enter yes or no.",
106
+ )
107
+
108
+ if distributed_type == DistributedType.NO:
109
+ use_cpu = _ask_field(
110
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
111
+ _convert_yes_no_to_bool,
112
+ default=False,
113
+ error_message="Please enter yes or no.",
114
+ )
115
+ elif distributed_type == DistributedType.MULTI_CPU:
116
+ use_cpu = True
117
+ else:
118
+ use_cpu = False
119
+
120
+ ipex_config = {}
121
+ mpirun_config = {}
122
+ if use_cpu:
123
+ ipex_config["ipex"] = _ask_field(
124
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
125
+ _convert_yes_no_to_bool,
126
+ default=False,
127
+ error_message="Please enter yes or no.",
128
+ )
129
+ if distributed_type == DistributedType.MULTI_CPU:
130
+ use_mpirun = _ask_field(
131
+ "Do you want accelerate to launch mpirun? [yes/NO]: ",
132
+ _convert_yes_no_to_bool,
133
+ default=False,
134
+ error_message="Please enter yes or no.",
135
+ )
136
+ if use_mpirun:
137
+ mpirun_hostfile = _ask_field(
138
+ "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
139
+ str,
140
+ default="~/hostfile",
141
+ )
142
+ mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
143
+ mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
144
+ if (
145
+ not use_cpu
146
+ and is_xpu_available()
147
+ and distributed_type
148
+ not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
149
+ ):
150
+ ipex_config["use_xpu"] = _ask_field(
151
+ "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
152
+ _convert_yes_no_to_bool,
153
+ default=False,
154
+ error_message="Please enter yes or no.",
155
+ )
156
+
157
+ dynamo_config = {}
158
+ use_dynamo = _ask_field(
159
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
160
+ _convert_yes_no_to_bool,
161
+ default=False,
162
+ error_message="Please enter yes or no.",
163
+ )
164
+ if use_dynamo:
165
+ prefix = "dynamo_"
166
+ dynamo_config[prefix + "backend"] = _ask_options(
167
+ "Which dynamo backend would you like to use?",
168
+ [x.lower() for x in DYNAMO_BACKENDS],
169
+ _convert_dynamo_backend,
170
+ default=2,
171
+ )
172
+ use_custom_options = _ask_field(
173
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
174
+ _convert_yes_no_to_bool,
175
+ default=False,
176
+ error_message="Please enter yes or no.",
177
+ )
178
+
179
+ if use_custom_options:
180
+ dynamo_config[prefix + "mode"] = _ask_options(
181
+ "Which mode do you want to use?",
182
+ TORCH_DYNAMO_MODES,
183
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
184
+ default=0,
185
+ )
186
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
187
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
188
+ _convert_yes_no_to_bool,
189
+ default=False,
190
+ error_message="Please enter yes or no.",
191
+ )
192
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
193
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
194
+ _convert_yes_no_to_bool,
195
+ default=False,
196
+ error_message="Please enter yes or no.",
197
+ )
198
+
199
+ use_mps = not use_cpu and is_mps_available()
200
+ deepspeed_config = {}
201
+ if (
202
+ distributed_type
203
+ in [
204
+ DistributedType.MULTI_GPU,
205
+ DistributedType.MULTI_XPU,
206
+ DistributedType.MULTI_NPU,
207
+ DistributedType.MULTI_MLU,
208
+ DistributedType.NO,
209
+ ]
210
+ and not use_mps
211
+ ):
212
+ use_deepspeed = _ask_field(
213
+ "Do you want to use DeepSpeed? [yes/NO]: ",
214
+ _convert_yes_no_to_bool,
215
+ default=False,
216
+ error_message="Please enter yes or no.",
217
+ )
218
+ if use_deepspeed:
219
+ distributed_type = DistributedType.DEEPSPEED
220
+ assert (
221
+ is_deepspeed_available()
222
+ ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
223
+
224
+ if distributed_type == DistributedType.DEEPSPEED:
225
+ use_deepspeed_config = _ask_field(
226
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
227
+ _convert_yes_no_to_bool,
228
+ default=False,
229
+ error_message="Please enter yes or no.",
230
+ )
231
+ if use_deepspeed_config:
232
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
233
+ "Please enter the path to the json DeepSpeed config file: ",
234
+ str,
235
+ default="none",
236
+ )
237
+ else:
238
+ deepspeed_config["zero_stage"] = _ask_options(
239
+ "What should be your DeepSpeed's ZeRO optimization stage?",
240
+ [0, 1, 2, 3],
241
+ int,
242
+ default=2,
243
+ )
244
+
245
+ deepspeed_devices = ["none", "cpu", "nvme"]
246
+ if deepspeed_config["zero_stage"] >= 2:
247
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
248
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
249
+ )
250
+ deepspeed_config["offload_param_device"] = _ask_options(
251
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
252
+ )
253
+ if deepspeed_config["offload_param_device"] == "nvme":
254
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
255
+ "Nvme Path to offload parameters?",
256
+ str,
257
+ default="/nvme",
258
+ )
259
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
260
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
261
+ "Nvme Path to offload optimizer states?",
262
+ str,
263
+ default="/nvme",
264
+ )
265
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
266
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
267
+ int,
268
+ default=1,
269
+ )
270
+ use_gradient_clipping = _ask_field(
271
+ "Do you want to use gradient clipping? [yes/NO]: ",
272
+ _convert_yes_no_to_bool,
273
+ default=False,
274
+ error_message="Please enter yes or no.",
275
+ )
276
+ if use_gradient_clipping:
277
+ deepspeed_config["gradient_clipping"] = _ask_field(
278
+ "What is the gradient clipping value? [1.0]: ",
279
+ float,
280
+ default=1.0,
281
+ )
282
+ if deepspeed_config["zero_stage"] == 3:
283
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
284
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
285
+ _convert_yes_no_to_bool,
286
+ default=False,
287
+ error_message="Please enter yes or no.",
288
+ )
289
+ deepspeed_config["zero3_init_flag"] = _ask_field(
290
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
291
+ _convert_yes_no_to_bool,
292
+ default=False,
293
+ error_message="Please enter yes or no.",
294
+ )
295
+ if deepspeed_config["zero3_init_flag"]:
296
+ if not is_transformers_available():
297
+ raise Exception(
298
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
299
+ "Please run `pip3 install transformers`."
300
+ )
301
+
302
+ if num_machines > 1:
303
+ launcher_query = "Which Type of launcher do you want to use?"
304
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
305
+ launcher_query,
306
+ DEEPSPEED_MULTINODE_LAUNCHERS,
307
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
308
+ )
309
+
310
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
311
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
312
+ "DeepSpeed configures multi-node compute resources with hostfile. "
313
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
314
+ "for more information please refer official [documentation]"
315
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
316
+ "Please specify the location of hostfile: ",
317
+ str,
318
+ )
319
+
320
+ is_exclusion_filter = _ask_field(
321
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
322
+ _convert_yes_no_to_bool,
323
+ default=False,
324
+ error_message="Please enter yes or no.",
325
+ )
326
+ if is_exclusion_filter:
327
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
328
+ "DeepSpeed exclusion filter string: ",
329
+ str,
330
+ )
331
+
332
+ is_inclusion_filter = _ask_field(
333
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
334
+ _convert_yes_no_to_bool,
335
+ default=False,
336
+ error_message="Please enter yes or no.",
337
+ )
338
+ if is_inclusion_filter:
339
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
340
+ "DeepSpeed inclusion filter string: ",
341
+ str,
342
+ )
343
+
344
+ fsdp_config = {}
345
+ if distributed_type in [
346
+ DistributedType.MULTI_GPU,
347
+ DistributedType.MULTI_NPU,
348
+ DistributedType.MULTI_MLU,
349
+ DistributedType.MULTI_XPU,
350
+ ]:
351
+ use_fsdp = _ask_field(
352
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
353
+ _convert_yes_no_to_bool,
354
+ default=False,
355
+ error_message="Please enter yes or no.",
356
+ )
357
+ if use_fsdp:
358
+ distributed_type = DistributedType.FSDP
359
+ if distributed_type == DistributedType.FSDP:
360
+ sharding_strategy_query = "What should be your sharding strategy?"
361
+ fsdp_config["fsdp_sharding_strategy"] = _ask_options(
362
+ sharding_strategy_query,
363
+ FSDP_SHARDING_STRATEGY,
364
+ lambda x: FSDP_SHARDING_STRATEGY[int(x)],
365
+ )
366
+ fsdp_config["fsdp_offload_params"] = _ask_field(
367
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
368
+ _convert_yes_no_to_bool,
369
+ default=False,
370
+ error_message="Please enter yes or no.",
371
+ )
372
+ fsdp_wrap_query = "What should be your auto wrap policy?"
373
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
374
+ fsdp_wrap_query,
375
+ FSDP_AUTO_WRAP_POLICY,
376
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
377
+ )
378
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
379
+ use_no_split_modules = _ask_field(
380
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
381
+ _convert_yes_no_to_bool,
382
+ default=False,
383
+ error_message="Please enter yes or no.",
384
+ )
385
+ if not use_no_split_modules:
386
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
387
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
388
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
389
+ str,
390
+ )
391
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
392
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
393
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
394
+ int,
395
+ default=100000000,
396
+ )
397
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
398
+ fsdp_config["fsdp_backward_prefetch"] = _ask_options(
399
+ fsdp_backward_prefetch_query,
400
+ FSDP_BACKWARD_PREFETCH,
401
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
402
+ )
403
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
404
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
405
+ fsdp_state_dict_type_query,
406
+ FSDP_STATE_DICT_TYPE,
407
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],
408
+ default=2,
409
+ )
410
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
411
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
412
+ _convert_yes_no_to_bool,
413
+ default=False,
414
+ error_message="Please enter yes or no.",
415
+ )
416
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
417
+ "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
418
+ _convert_yes_no_to_bool,
419
+ default=True,
420
+ error_message="Please enter yes or no.",
421
+ )
422
+ fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
423
+ "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
424
+ _convert_yes_no_to_bool,
425
+ default=True,
426
+ error_message="Please enter yes or no.",
427
+ )
428
+ if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
429
+ fsdp_config["fsdp_sync_module_states"] = True
430
+ else:
431
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
432
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
433
+ _convert_yes_no_to_bool,
434
+ default=True,
435
+ error_message="Please enter yes or no.",
436
+ )
437
+
438
+ megatron_lm_config = {}
439
+ if distributed_type in [DistributedType.MULTI_GPU]:
440
+ use_megatron_lm = _ask_field(
441
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
442
+ _convert_yes_no_to_bool,
443
+ default=False,
444
+ error_message="Please enter yes or no.",
445
+ )
446
+ if use_megatron_lm:
447
+ distributed_type = DistributedType.MEGATRON_LM
448
+ if distributed_type == DistributedType.MEGATRON_LM:
449
+ prefix = "megatron_lm_"
450
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
451
+ "What is the Tensor Parallelism degree/size? [1]:",
452
+ int,
453
+ default=1,
454
+ error_message="Please enter an integer.",
455
+ )
456
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
457
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
458
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
459
+ _convert_yes_no_to_bool,
460
+ default=True,
461
+ error_message="Please enter yes or no.",
462
+ )
463
+
464
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
465
+ "What is the Pipeline Parallelism degree/size? [1]:",
466
+ int,
467
+ default=1,
468
+ error_message="Please enter an integer.",
469
+ )
470
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
471
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
472
+ "What is the number of micro-batches? [1]:",
473
+ int,
474
+ default=1,
475
+ error_message="Please enter an integer.",
476
+ )
477
+
478
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
479
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
480
+ _convert_yes_no_to_bool,
481
+ default=True,
482
+ error_message="Please enter yes or no.",
483
+ )
484
+
485
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
486
+ "Do you want to use distributed optimizer "
487
+ "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
488
+ _convert_yes_no_to_bool,
489
+ default=True,
490
+ error_message="Please enter yes or no.",
491
+ )
492
+
493
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
494
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
495
+ float,
496
+ default=1.0,
497
+ )
498
+ # TPU specific defaults
499
+ tpu_commands = None
500
+ tpu_command_file = None
501
+ tpu_downcast_bf16 = "no"
502
+ tpu_env = []
503
+ tpu_name = None
504
+ tpu_vm = None
505
+ tpu_zone = None
506
+ tpu_use_sudo = False
507
+ tpu_use_cluster = False
508
+
509
+ if distributed_type in [
510
+ DistributedType.MULTI_CPU,
511
+ DistributedType.MULTI_XPU,
512
+ DistributedType.MULTI_GPU,
513
+ DistributedType.MULTI_MLU,
514
+ DistributedType.MULTI_NPU,
515
+ DistributedType.XLA,
516
+ ]:
517
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
518
+ if machine_type == "TPU":
519
+ machine_type += " cores"
520
+ elif machine_type == "CPU":
521
+ machine_type = "processes"
522
+ else:
523
+ machine_type += "(s)"
524
+ num_processes = _ask_field(
525
+ f"How many {machine_type} should be used for distributed training? [1]:",
526
+ int,
527
+ default=1,
528
+ error_message="Please enter an integer.",
529
+ )
530
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
531
+ num_processes = _ask_field(
532
+ "How many GPU(s) should be used for distributed training? [1]:",
533
+ int,
534
+ default=1,
535
+ error_message="Please enter an integer.",
536
+ )
537
+ else:
538
+ num_processes = 1
539
+
540
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
541
+ raise ValueError(
542
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
543
+ )
544
+
545
+ if (
546
+ distributed_type
547
+ in [
548
+ DistributedType.MULTI_GPU,
549
+ DistributedType.MULTI_MLU,
550
+ DistributedType.MULTI_NPU,
551
+ DistributedType.MULTI_XPU,
552
+ DistributedType.NO,
553
+ ]
554
+ and not use_cpu
555
+ and not use_mps
556
+ ):
557
+ if is_npu_available():
558
+ machine_type = "NPU(s)"
559
+ elif is_mlu_available():
560
+ machine_type = "MLU(s)"
561
+ else:
562
+ machine_type = "GPU(s)"
563
+ gpu_ids = _ask_field(
564
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
565
+ default="all",
566
+ )
567
+
568
+ # CPU affinity is only supported on NVIDIA hardware for now
569
+ enable_cpu_affinity = False
570
+ if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
571
+ enable_cpu_affinity = _ask_field(
572
+ "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
573
+ _convert_yes_no_to_bool,
574
+ default=False,
575
+ error_message="Please enter yes or no.",
576
+ )
577
+
578
+ if distributed_type == DistributedType.XLA:
579
+ mixed_precision = "no"
580
+ main_training_function = _ask_field(
581
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
582
+ default="main",
583
+ )
584
+ tpu_use_cluster = _ask_field(
585
+ "Are you using a TPU cluster? [yes/NO]: ",
586
+ _convert_yes_no_to_bool,
587
+ default=False,
588
+ error_message="Please enter yes or no.",
589
+ )
590
+ if tpu_use_cluster:
591
+ tpu_name = _ask_field(
592
+ "What is the name of your TPU cluster? ",
593
+ default=None,
594
+ error_message="Please enter the name of your TPU cluster.",
595
+ )
596
+ tpu_zone = _ask_field(
597
+ "What is the zone of your TPU cluster? ",
598
+ default=None,
599
+ error_message="Please enter the zone of your TPU cluster.",
600
+ )
601
+ tpu_use_sudo = _ask_field(
602
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
603
+ default=False,
604
+ error_message="Please enter yes or no.",
605
+ )
606
+ run_commands = _ask_field(
607
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
608
+ _convert_yes_no_to_bool,
609
+ default=False,
610
+ error_message="Please enter yes or no.",
611
+ )
612
+ if run_commands:
613
+ use_command_file = _ask_field(
614
+ "Is this code located in a bash script? [yes/NO]: ",
615
+ _convert_yes_no_to_bool,
616
+ default=False,
617
+ error_message="Please enter yes or no.",
618
+ )
619
+ if use_command_file:
620
+ tpu_command_file = _ask_field(
621
+ "What is the path to your bash script? ",
622
+ default=None,
623
+ error_message="Please enter the path to your bash script.",
624
+ )
625
+ tpu_command_file = os.path.abspath(tpu_command_file)
626
+ else:
627
+ print("Please enter each command seperately you wish to run on startup in each pod.")
628
+ tpu_commands = []
629
+ another_command = True
630
+ while another_command:
631
+ tpu_commands.append(
632
+ _ask_field(
633
+ "Please enter a single command to be ran ",
634
+ default=None,
635
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
636
+ )
637
+ )
638
+ another_command = _ask_field(
639
+ "Do you wish to add another command? [yes/NO]: ",
640
+ _convert_yes_no_to_bool,
641
+ default=False,
642
+ error_message="Please enter yes or no.",
643
+ )
644
+ tpu_vm = _ask_field(
645
+ "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
646
+ default="",
647
+ ).split(",")
648
+ tpu_env = _ask_field(
649
+ "What environment variables do you wish to set in each pod, seperated by a comma: ",
650
+ default="",
651
+ ).split(",")
652
+
653
+ else:
654
+ main_training_function = "main"
655
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
656
+ mixed_precision = None
657
+ else:
658
+ mixed_precision = _ask_options(
659
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
660
+ ["no", "fp16", "bf16", "fp8"],
661
+ _convert_mixed_precision,
662
+ )
663
+
664
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
665
+ print(
666
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
667
+ )
668
+
669
+ if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
670
+ tpu_downcast_bf16 = _ask_field(
671
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
672
+ )
673
+
674
+ return ClusterConfig(
675
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
676
+ distributed_type=distributed_type,
677
+ num_processes=num_processes,
678
+ gpu_ids=gpu_ids,
679
+ mixed_precision=mixed_precision,
680
+ downcast_bf16=tpu_downcast_bf16,
681
+ machine_rank=machine_rank,
682
+ num_machines=num_machines,
683
+ main_process_ip=main_process_ip,
684
+ main_process_port=main_process_port,
685
+ main_training_function=main_training_function,
686
+ deepspeed_config=deepspeed_config,
687
+ fsdp_config=fsdp_config,
688
+ megatron_lm_config=megatron_lm_config,
689
+ ipex_config=ipex_config,
690
+ mpirun_config=mpirun_config,
691
+ use_cpu=use_cpu,
692
+ rdzv_backend=rdzv_backend,
693
+ same_network=same_network,
694
+ commands=tpu_commands,
695
+ command_file=tpu_command_file,
696
+ tpu_env=tpu_env,
697
+ tpu_name=tpu_name,
698
+ tpu_vm=tpu_vm,
699
+ tpu_zone=tpu_zone,
700
+ tpu_use_sudo=tpu_use_sudo,
701
+ tpu_use_cluster=tpu_use_cluster,
702
+ dynamo_config=dynamo_config,
703
+ debug=debug,
704
+ enable_cpu_affinity=enable_cpu_affinity,
705
+ )
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+
20
+ from accelerate.utils import ComputeEnvironment
21
+
22
+ from .cluster import get_cluster_input
23
+ from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
24
+ from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
25
+ from .sagemaker import get_sagemaker_input
26
+
27
+
28
+ description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
29
+
30
+
31
+ def get_user_input():
32
+ compute_environment = _ask_options(
33
+ "In which compute environment are you running?",
34
+ ["This machine", "AWS (Amazon SageMaker)"],
35
+ _convert_compute_environment,
36
+ )
37
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
38
+ config = get_sagemaker_input()
39
+ else:
40
+ config = get_cluster_input()
41
+ return config
42
+
43
+
44
+ def config_command_parser(subparsers=None):
45
+ if subparsers is not None:
46
+ parser = subparsers.add_parser("config", description=description)
47
+ else:
48
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
49
+
50
+ parser.add_argument(
51
+ "--config_file",
52
+ default=None,
53
+ help=(
54
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
55
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
56
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
57
+ "with 'huggingface'."
58
+ ),
59
+ )
60
+
61
+ if subparsers is not None:
62
+ parser.set_defaults(func=config_command)
63
+ return parser
64
+
65
+
66
+ def config_command(args):
67
+ config = get_user_input()
68
+ if args.config_file is not None:
69
+ config_file = args.config_file
70
+ else:
71
+ if not os.path.isdir(cache_dir):
72
+ os.makedirs(cache_dir)
73
+ config_file = default_yaml_config_file
74
+
75
+ if config_file.endswith(".json"):
76
+ config.to_json_file(config_file)
77
+ else:
78
+ config.to_yaml_file(config_file)
79
+ print(f"accelerate configuration saved at {config_file}")
80
+
81
+
82
+ def main():
83
+ parser = config_command_parser()
84
+ args = parser.parse_args()
85
+ config_command(args)
86
+
87
+
88
+ if __name__ == "__main__":
89
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_args.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ import os
19
+ from dataclasses import dataclass
20
+ from enum import Enum
21
+ from typing import List, Optional, Union
22
+
23
+ import yaml
24
+
25
+ from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
26
+ from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
27
+
28
+
29
+ hf_cache_home = os.path.expanduser(
30
+ os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
31
+ )
32
+ cache_dir = os.path.join(hf_cache_home, "accelerate")
33
+ default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
34
+ default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
35
+
36
+ # For backward compatibility: the default config is the json one if it's the only existing file.
37
+ if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
38
+ default_config_file = default_yaml_config_file
39
+ else:
40
+ default_config_file = default_json_config_file
41
+
42
+
43
+ def load_config_from_file(config_file):
44
+ if config_file is not None:
45
+ if not os.path.isfile(config_file):
46
+ raise FileNotFoundError(
47
+ f"The passed configuration file `{config_file}` does not exist. "
48
+ "Please pass an existing file to `accelerate launch`, or use the default one "
49
+ "created through `accelerate config` and run `accelerate launch` "
50
+ "without the `--config_file` argument."
51
+ )
52
+ else:
53
+ config_file = default_config_file
54
+ with open(config_file, encoding="utf-8") as f:
55
+ if config_file.endswith(".json"):
56
+ if (
57
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
58
+ == ComputeEnvironment.LOCAL_MACHINE
59
+ ):
60
+ config_class = ClusterConfig
61
+ else:
62
+ config_class = SageMakerConfig
63
+ return config_class.from_json_file(json_file=config_file)
64
+ else:
65
+ if (
66
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
67
+ == ComputeEnvironment.LOCAL_MACHINE
68
+ ):
69
+ config_class = ClusterConfig
70
+ else:
71
+ config_class = SageMakerConfig
72
+ return config_class.from_yaml_file(yaml_file=config_file)
73
+
74
+
75
+ @dataclass
76
+ class BaseConfig:
77
+ compute_environment: ComputeEnvironment
78
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
79
+ mixed_precision: str
80
+ use_cpu: bool
81
+ debug: bool
82
+
83
+ def to_dict(self):
84
+ result = self.__dict__
85
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
86
+ for key, value in result.items():
87
+ if isinstance(value, Enum):
88
+ result[key] = value.value
89
+ if isinstance(value, dict) and not bool(value):
90
+ result[key] = None
91
+ result = {k: v for k, v in result.items() if v is not None}
92
+ return result
93
+
94
+ @classmethod
95
+ def from_json_file(cls, json_file=None):
96
+ json_file = default_json_config_file if json_file is None else json_file
97
+ with open(json_file, encoding="utf-8") as f:
98
+ config_dict = json.load(f)
99
+ if "compute_environment" not in config_dict:
100
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
101
+ if "mixed_precision" not in config_dict:
102
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
103
+ if "fp16" in config_dict: # Convert the config to the new format.
104
+ del config_dict["fp16"]
105
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
106
+ dynamo_backend = config_dict.pop("dynamo_backend")
107
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
108
+ if "use_cpu" not in config_dict:
109
+ config_dict["use_cpu"] = False
110
+ if "debug" not in config_dict:
111
+ config_dict["debug"] = False
112
+ if "enable_cpu_affinity" not in config_dict:
113
+ config_dict["enable_cpu_affinity"] = False
114
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
115
+ if len(extra_keys) > 0:
116
+ raise ValueError(
117
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
118
+ " version or fix (and potentially remove) these keys from your config file."
119
+ )
120
+
121
+ return cls(**config_dict)
122
+
123
+ def to_json_file(self, json_file):
124
+ with open(json_file, "w", encoding="utf-8") as f:
125
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
126
+ f.write(content)
127
+
128
+ @classmethod
129
+ def from_yaml_file(cls, yaml_file=None):
130
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
131
+ with open(yaml_file, encoding="utf-8") as f:
132
+ config_dict = yaml.safe_load(f)
133
+ if "compute_environment" not in config_dict:
134
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
135
+ if "mixed_precision" not in config_dict:
136
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
137
+ if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
138
+ config_dict["mixed_precision"] = "no"
139
+ if "fp16" in config_dict: # Convert the config to the new format.
140
+ del config_dict["fp16"]
141
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
142
+ dynamo_backend = config_dict.pop("dynamo_backend")
143
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
144
+ if "use_cpu" not in config_dict:
145
+ config_dict["use_cpu"] = False
146
+ if "debug" not in config_dict:
147
+ config_dict["debug"] = False
148
+ if "enable_cpu_affinity" not in config_dict:
149
+ config_dict["enable_cpu_affinity"] = False
150
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
151
+ if len(extra_keys) > 0:
152
+ raise ValueError(
153
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
154
+ " version or fix (and potentially remove) these keys from your config file."
155
+ )
156
+ return cls(**config_dict)
157
+
158
+ def to_yaml_file(self, yaml_file):
159
+ with open(yaml_file, "w", encoding="utf-8") as f:
160
+ yaml.safe_dump(self.to_dict(), f)
161
+
162
+ def __post_init__(self):
163
+ if isinstance(self.compute_environment, str):
164
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
165
+ if isinstance(self.distributed_type, str):
166
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
167
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
168
+ else:
169
+ self.distributed_type = DistributedType(self.distributed_type)
170
+ if getattr(self, "dynamo_config", None) is None:
171
+ self.dynamo_config = {}
172
+
173
+
174
+ @dataclass
175
+ class ClusterConfig(BaseConfig):
176
+ num_processes: int
177
+ machine_rank: int = 0
178
+ num_machines: int = 1
179
+ gpu_ids: Optional[str] = None
180
+ main_process_ip: Optional[str] = None
181
+ main_process_port: Optional[int] = None
182
+ rdzv_backend: Optional[str] = "static"
183
+ same_network: Optional[bool] = False
184
+ main_training_function: str = "main"
185
+ enable_cpu_affinity: bool = False
186
+
187
+ # args for deepspeed_plugin
188
+ deepspeed_config: dict = None
189
+ # args for fsdp
190
+ fsdp_config: dict = None
191
+ # args for megatron_lm
192
+ megatron_lm_config: dict = None
193
+ # args for ipex
194
+ ipex_config: dict = None
195
+ # args for mpirun
196
+ mpirun_config: dict = None
197
+ # args for TPU
198
+ downcast_bf16: bool = False
199
+
200
+ # args for TPU pods
201
+ tpu_name: str = None
202
+ tpu_zone: str = None
203
+ tpu_use_cluster: bool = False
204
+ tpu_use_sudo: bool = False
205
+ command_file: str = None
206
+ commands: List[str] = None
207
+ tpu_vm: List[str] = None
208
+ tpu_env: List[str] = None
209
+
210
+ # args for dynamo
211
+ dynamo_config: dict = None
212
+
213
+ def __post_init__(self):
214
+ if self.deepspeed_config is None:
215
+ self.deepspeed_config = {}
216
+ if self.fsdp_config is None:
217
+ self.fsdp_config = {}
218
+ if self.megatron_lm_config is None:
219
+ self.megatron_lm_config = {}
220
+ if self.ipex_config is None:
221
+ self.ipex_config = {}
222
+ if self.mpirun_config is None:
223
+ self.mpirun_config = {}
224
+ return super().__post_init__()
225
+
226
+
227
+ @dataclass
228
+ class SageMakerConfig(BaseConfig):
229
+ ec2_instance_type: str
230
+ iam_role_name: str
231
+ image_uri: Optional[str] = None
232
+ profile: Optional[str] = None
233
+ region: str = "us-east-1"
234
+ num_machines: int = 1
235
+ gpu_ids: str = "all"
236
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
237
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
238
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
239
+ py_version: str = SAGEMAKER_PYTHON_VERSION
240
+ sagemaker_inputs_file: str = None
241
+ sagemaker_metrics_file: str = None
242
+ additional_args: dict = None
243
+ dynamo_config: dict = None
env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/default.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from pathlib import Path
18
+
19
+ import torch
20
+
21
+ from ...utils import is_mlu_available, is_npu_available, is_xpu_available
22
+ from .config_args import ClusterConfig, default_json_config_file
23
+ from .config_utils import SubcommandHelpFormatter
24
+
25
+
26
+ description = "Create a default config file for Accelerate with only a few flags set."
27
+
28
+
29
+ def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
30
+ """
31
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
32
+ set CPU if it is a CPU-only machine.
33
+
34
+ Args:
35
+ mixed_precision (`str`, *optional*, defaults to "no"):
36
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
37
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
38
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
39
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
40
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
41
+ use_xpu (`bool`, *optional*, defaults to `False`):
42
+ Whether to use XPU if available.
43
+ """
44
+ path = Path(save_location)
45
+ path.parent.mkdir(parents=True, exist_ok=True)
46
+ if path.exists():
47
+ print(
48
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
49
+ )
50
+ return False
51
+ mixed_precision = mixed_precision.lower()
52
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
53
+ raise ValueError(
54
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
55
+ )
56
+ config = {
57
+ "compute_environment": "LOCAL_MACHINE",
58
+ "mixed_precision": mixed_precision,
59
+ }
60
+ if is_mlu_available():
61
+ num_mlus = torch.mlu.device_count()
62
+ config["num_processes"] = num_mlus
63
+ config["use_cpu"] = False
64
+ if num_mlus > 1:
65
+ config["distributed_type"] = "MULTI_MLU"
66
+ else:
67
+ config["distributed_type"] = "NO"
68
+ elif torch.cuda.is_available():
69
+ num_gpus = torch.cuda.device_count()
70
+ config["num_processes"] = num_gpus
71
+ config["use_cpu"] = False
72
+ if num_gpus > 1:
73
+ config["distributed_type"] = "MULTI_GPU"
74
+ else:
75
+ config["distributed_type"] = "NO"
76
+ elif is_xpu_available() and use_xpu:
77
+ num_xpus = torch.xpu.device_count()
78
+ config["num_processes"] = num_xpus
79
+ config["use_cpu"] = False
80
+ if num_xpus > 1:
81
+ config["distributed_type"] = "MULTI_XPU"
82
+ else:
83
+ config["distributed_type"] = "NO"
84
+ elif is_npu_available():
85
+ num_npus = torch.npu.device_count()
86
+ config["num_processes"] = num_npus
87
+ config["use_cpu"] = False
88
+ if num_npus > 1:
89
+ config["distributed_type"] = "MULTI_NPU"
90
+ else:
91
+ config["distributed_type"] = "NO"
92
+ else:
93
+ num_xpus = 0
94
+ config["use_cpu"] = True
95
+ config["num_processes"] = 1
96
+ config["distributed_type"] = "NO"
97
+ config["debug"] = False
98
+ config = ClusterConfig(**config)
99
+ config.to_json_file(path)
100
+ return path
101
+
102
+
103
+ def default_command_parser(parser, parents):
104
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
105
+ parser.add_argument(
106
+ "--config_file",
107
+ default=default_json_config_file,
108
+ help=(
109
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
110
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
111
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
112
+ "with 'huggingface'."
113
+ ),
114
+ dest="save_location",
115
+ )
116
+
117
+ parser.add_argument(
118
+ "--mixed_precision",
119
+ choices=["no", "fp16", "bf16"],
120
+ type=str,
121
+ help="Whether or not to use mixed precision training. "
122
+ "Choose between FP16 and BF16 (bfloat16) training. "
123
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
124
+ default="no",
125
+ )
126
+ parser.set_defaults(func=default_config_command)
127
+ return parser
128
+
129
+
130
+ def default_config_command(args):
131
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
132
+ if config_file:
133
+ print(f"accelerate configuration saved at {config_file}")
env-llmeval/lib/python3.10/site-packages/accelerate/commands/env.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import platform
20
+ import subprocess
21
+
22
+ import numpy as np
23
+ import psutil
24
+ import torch
25
+
26
+ from accelerate import __version__ as version
27
+ from accelerate.commands.config import default_config_file, load_config_from_file
28
+
29
+ from ..utils import is_mlu_available, is_npu_available, is_xpu_available
30
+
31
+
32
+ def env_command_parser(subparsers=None):
33
+ if subparsers is not None:
34
+ parser = subparsers.add_parser("env")
35
+ else:
36
+ parser = argparse.ArgumentParser("Accelerate env command")
37
+
38
+ parser.add_argument(
39
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
40
+ )
41
+
42
+ if subparsers is not None:
43
+ parser.set_defaults(func=env_command)
44
+ return parser
45
+
46
+
47
+ def env_command(args):
48
+ pt_version = torch.__version__
49
+ pt_cuda_available = torch.cuda.is_available()
50
+ pt_xpu_available = is_xpu_available()
51
+ pt_mlu_available = is_mlu_available()
52
+ pt_npu_available = is_npu_available()
53
+
54
+ accelerate_config = "Not found"
55
+ # Get the default from the config file.
56
+ if args.config_file is not None or os.path.isfile(default_config_file):
57
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
58
+
59
+ # if we can run which, get it
60
+ command = None
61
+ bash_location = "Not found"
62
+ if os.name == "nt":
63
+ command = ["where", "accelerate"]
64
+ elif os.name == "posix":
65
+ command = ["which", "accelerate"]
66
+ if command is not None:
67
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
68
+ info = {
69
+ "`Accelerate` version": version,
70
+ "Platform": platform.platform(),
71
+ "`accelerate` bash location": bash_location,
72
+ "Python version": platform.python_version(),
73
+ "Numpy version": np.__version__,
74
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
75
+ "PyTorch XPU available": str(pt_xpu_available),
76
+ "PyTorch NPU available": str(pt_npu_available),
77
+ "PyTorch MLU available": str(pt_mlu_available),
78
+ "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
79
+ }
80
+ if pt_cuda_available:
81
+ info["GPU type"] = torch.cuda.get_device_name()
82
+
83
+ print("\nCopy-and-paste the text below in your GitHub issue\n")
84
+ print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
85
+
86
+ print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
87
+ accelerate_config_str = (
88
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
89
+ if isinstance(accelerate_config, dict)
90
+ else f"\t{accelerate_config}"
91
+ )
92
+ print(accelerate_config_str)
93
+
94
+ info["`Accelerate` configs"] = accelerate_config
95
+
96
+ return info
97
+
98
+
99
+ def main() -> int:
100
+ parser = env_command_parser()
101
+ args = parser.parse_args()
102
+ env_command(args)
103
+ return 0
104
+
105
+
106
+ if __name__ == "__main__":
107
+ raise SystemExit(main())
env-llmeval/lib/python3.10/site-packages/accelerate/commands/estimate.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from huggingface_hub import model_info
17
+ from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
18
+
19
+ from accelerate import init_empty_weights
20
+ from accelerate.commands.utils import CustomArgumentParser
21
+ from accelerate.utils import (
22
+ calculate_maximum_sizes,
23
+ convert_bytes,
24
+ is_timm_available,
25
+ is_transformers_available,
26
+ )
27
+
28
+
29
+ if is_transformers_available():
30
+ import transformers
31
+ from transformers import AutoConfig, AutoModel
32
+
33
+ if is_timm_available():
34
+ import timm
35
+
36
+
37
+ def verify_on_hub(repo: str, token: str = None):
38
+ "Verifies that the model is on the hub and returns the model info."
39
+ try:
40
+ return model_info(repo, token=token)
41
+ except GatedRepoError:
42
+ return "gated"
43
+ except RepositoryNotFoundError:
44
+ return "repo"
45
+
46
+
47
+ def check_has_model(error):
48
+ """
49
+ Checks what library spawned `error` when a model is not found
50
+ """
51
+ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
52
+ return "timm"
53
+ elif (
54
+ is_transformers_available()
55
+ and isinstance(error, OSError)
56
+ and "does not appear to have a file named" in error.args[0]
57
+ ):
58
+ return "transformers"
59
+ else:
60
+ return "unknown"
61
+
62
+
63
+ def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
64
+ """
65
+ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
66
+
67
+ Args:
68
+ model_name (`str`):
69
+ The model name on the Hub
70
+ library_name (`str`):
71
+ The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
72
+ metadata on the Hub to determine the library.
73
+ trust_remote_code (`bool`, `optional`, defaults to `False`):
74
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
75
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
76
+ execute code present on the Hub on your local machine.
77
+ access_token (`str`, `optional`, defaults to `None`):
78
+ The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
79
+
80
+ Returns:
81
+ `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
82
+
83
+ """
84
+ model_info = verify_on_hub(model_name, access_token)
85
+ # Simplified errors
86
+ if model_info == "gated":
87
+ raise GatedRepoError(
88
+ f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
89
+ )
90
+ elif model_info == "repo":
91
+ raise RepositoryNotFoundError(
92
+ f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
93
+ " make sure you are authenticated via `huggingface-cli login` and have access."
94
+ )
95
+ if library_name is None:
96
+ library_name = getattr(model_info, "library_name", False)
97
+ if not library_name:
98
+ raise ValueError(
99
+ f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
100
+ )
101
+ if library_name == "transformers":
102
+ if not is_transformers_available():
103
+ raise ImportError(
104
+ f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
105
+ )
106
+ print(f"Loading pretrained config for `{model_name}` from `transformers`...")
107
+ if model_info.config is None:
108
+ raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
109
+
110
+ auto_map = model_info.config.get("auto_map", False)
111
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
112
+ with init_empty_weights():
113
+ # remote code could specify a specific `AutoModel` class in the `auto_map`
114
+ constructor = AutoModel
115
+ if isinstance(auto_map, dict):
116
+ value = None
117
+ for key in auto_map.keys():
118
+ if key.startswith("AutoModelFor"):
119
+ value = key
120
+ break
121
+ if value is not None:
122
+ constructor = getattr(transformers, value)
123
+ model = constructor.from_config(config, trust_remote_code=trust_remote_code)
124
+ elif library_name == "timm":
125
+ if not is_timm_available():
126
+ raise ImportError(
127
+ f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
128
+ )
129
+ print(f"Loading pretrained config for `{model_name}` from `timm`...")
130
+ with init_empty_weights():
131
+ model = timm.create_model(model_name, pretrained=False)
132
+ else:
133
+ raise ValueError(
134
+ f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
135
+ )
136
+ return model
137
+
138
+
139
+ def create_ascii_table(headers: list, rows: list, title: str):
140
+ "Creates a pretty table from a list of rows, minimal version of `tabulate`."
141
+ sep_char, in_between = "│", "─"
142
+ column_widths = []
143
+ for i in range(len(headers)):
144
+ column_values = [row[i] for row in rows] + [headers[i]]
145
+ max_column_width = max(len(value) for value in column_values)
146
+ column_widths.append(max_column_width)
147
+
148
+ formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
149
+
150
+ pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
151
+ diff = 0
152
+
153
+ def make_row(left_char, middle_char, right_char):
154
+ return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
155
+
156
+ separator = make_row("├", "┼", "┤")
157
+ if len(title) > sum(column_widths):
158
+ diff = abs(len(title) - len(separator))
159
+ column_widths[-1] += diff
160
+
161
+ # Update with diff
162
+ separator = make_row("├", "┼", "┤")
163
+ initial_rows = [
164
+ make_row("┌", in_between, "┐"),
165
+ f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
166
+ make_row("├", "┬", "┤"),
167
+ ]
168
+ table = "\n".join(initial_rows) + "\n"
169
+ column_widths[-1] += diff
170
+ centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
171
+ table += f"{pattern % tuple(centered_line)}\n{separator}\n"
172
+ for i, line in enumerate(rows):
173
+ centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
174
+ table += f"{pattern % tuple(centered_line)}\n"
175
+ table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
176
+
177
+ return table
178
+
179
+
180
+ def estimate_command_parser(subparsers=None):
181
+ if subparsers is not None:
182
+ parser = subparsers.add_parser("estimate-memory")
183
+ else:
184
+ parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
185
+
186
+ parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
187
+ parser.add_argument(
188
+ "--library_name",
189
+ type=str,
190
+ help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
191
+ choices=["timm", "transformers"],
192
+ )
193
+ parser.add_argument(
194
+ "--dtypes",
195
+ type=str,
196
+ nargs="+",
197
+ default=["float32", "float16", "int8", "int4"],
198
+ help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
199
+ choices=["float32", "float16", "int8", "int4"],
200
+ )
201
+ parser.add_argument(
202
+ "--trust_remote_code",
203
+ action="store_true",
204
+ help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
205
+ should only be used for repositories you trust and in which you have read the code, as it will execute
206
+ code present on the Hub on your local machine.""",
207
+ default=False,
208
+ )
209
+
210
+ if subparsers is not None:
211
+ parser.set_defaults(func=estimate_command)
212
+ return parser
213
+
214
+
215
+ def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
216
+ """
217
+ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
218
+ 1.
219
+
220
+ Args:
221
+ bytes (`int`):
222
+ The size of the model being trained.
223
+ mixed_precision (`str`):
224
+ The mixed precision that would be ran.
225
+ msamp_config (`str`):
226
+ The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
227
+ """
228
+ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
229
+ fp32_size = bytes
230
+ fp16_size = bytes // 2
231
+
232
+ if mixed_precision == "float32":
233
+ memory_sizes["model"] = fp32_size
234
+ memory_sizes["gradients"] = fp32_size
235
+ memory_sizes["optimizer"] = fp32_size * 2
236
+ memory_sizes["step"] = fp32_size * 4
237
+ elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
238
+ # With native `TransformersEngine`, there is no memory savings with FP8
239
+ # With mixed precision training, the model has weights stored
240
+ # in FP16 and FP32
241
+ memory_sizes["model"] = fp32_size
242
+ # 1.5 from weight gradient + computation (GEMM)
243
+ memory_sizes["gradients"] = fp32_size + fp16_size
244
+ # 2x from optimizer states
245
+ memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
246
+ memory_sizes["step"] = memory_sizes["optimizer"]
247
+ return memory_sizes
248
+
249
+
250
+ def gather_data(args):
251
+ "Creates an empty model and gathers the data for the sizes"
252
+ try:
253
+ model = create_empty_model(
254
+ args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
255
+ )
256
+ except (RuntimeError, OSError) as e:
257
+ library = check_has_model(e)
258
+ if library != "unknown":
259
+ raise RuntimeError(
260
+ f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
261
+ )
262
+ raise e
263
+
264
+ total_size, largest_layer = calculate_maximum_sizes(model)
265
+
266
+ data = []
267
+
268
+ for dtype in args.dtypes:
269
+ dtype_total_size = total_size
270
+ dtype_largest_layer = largest_layer[0]
271
+ dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
272
+ if dtype == "float16":
273
+ dtype_total_size /= 2
274
+ dtype_largest_layer /= 2
275
+ elif dtype == "int8":
276
+ dtype_total_size /= 4
277
+ dtype_largest_layer /= 4
278
+ elif dtype == "int4":
279
+ dtype_total_size /= 8
280
+ dtype_largest_layer /= 8
281
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
282
+ return data
283
+
284
+
285
+ def estimate_command(args):
286
+ data = gather_data(args)
287
+ for row in data:
288
+ for i, item in enumerate(row):
289
+ if isinstance(item, (int, float)):
290
+ row[i] = convert_bytes(item)
291
+ elif isinstance(item, dict):
292
+ training_usage = max(item.values())
293
+ row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
294
+
295
+ headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
296
+
297
+ title = f"Memory Usage for loading `{args.model_name}`"
298
+ table = create_ascii_table(headers, data, title)
299
+ print(table)
300
+
301
+
302
+ def main():
303
+ parser = estimate_command_parser()
304
+ args = parser.parse_args()
305
+ estimate_command(args)
306
+
307
+
308
+ if __name__ == "__main__":
309
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/launch.py ADDED
@@ -0,0 +1,1085 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import importlib
19
+ import logging
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ from pathlib import Path
24
+
25
+ import psutil
26
+ import torch
27
+
28
+ from accelerate.commands.config import default_config_file, load_config_from_file
29
+ from accelerate.commands.config.config_args import SageMakerConfig
30
+ from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
31
+ from accelerate.commands.utils import CustomArgumentParser
32
+ from accelerate.state import get_int_from_env
33
+ from accelerate.utils import (
34
+ ComputeEnvironment,
35
+ DistributedType,
36
+ PrepareForLaunch,
37
+ _filter_args,
38
+ check_cuda_p2p_ib_support,
39
+ convert_dict_to_env_variables,
40
+ is_bf16_available,
41
+ is_deepspeed_available,
42
+ is_mlu_available,
43
+ is_npu_available,
44
+ is_rich_available,
45
+ is_sagemaker_available,
46
+ is_torch_version,
47
+ is_torch_xla_available,
48
+ is_xpu_available,
49
+ patch_environment,
50
+ prepare_deepspeed_cmd_env,
51
+ prepare_multi_gpu_env,
52
+ prepare_sagemager_args_inputs,
53
+ prepare_simple_launcher_cmd_env,
54
+ prepare_tpu,
55
+ )
56
+ from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
57
+
58
+
59
+ if is_rich_available():
60
+ from rich import get_console
61
+ from rich.logging import RichHandler
62
+
63
+ FORMAT = "%(message)s"
64
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
65
+
66
+
67
+ logger = logging.getLogger(__name__)
68
+
69
+
70
+ options_to_group = {
71
+ "multi_gpu": "Distributed GPUs",
72
+ "tpu": "TPU",
73
+ "use_deepspeed": "DeepSpeed Arguments",
74
+ "use_fsdp": "FSDP Arguments",
75
+ "use_megatron_lm": "Megatron-LM Arguments",
76
+ }
77
+
78
+
79
+ def clean_option(option):
80
+ "Finds all cases of - after the first two characters and changes them to _"
81
+ if option.startswith("--"):
82
+ return option[2:].replace("-", "_")
83
+
84
+
85
+ class CustomHelpFormatter(argparse.HelpFormatter):
86
+ """
87
+ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
88
+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
89
+ for that platform.
90
+ """
91
+
92
+ def __init__(self, *args, **kwargs):
93
+ super().__init__(*args, **kwargs)
94
+ self.titles = [
95
+ "Hardware Selection Arguments",
96
+ "Resource Selection Arguments",
97
+ "Training Paradigm Arguments",
98
+ "positional arguments",
99
+ "optional arguments",
100
+ ]
101
+
102
+ def add_argument(self, action: argparse.Action):
103
+ if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
104
+ args = sys.argv[2:]
105
+ else:
106
+ args = sys.argv[1:]
107
+
108
+ if len(args) > 1:
109
+ args = list(map(clean_option, args))
110
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
111
+ used_titles = [options_to_group[o] for o in used_platforms]
112
+ if action.container.title not in self.titles + used_titles:
113
+ action.help = argparse.SUPPRESS
114
+ elif action.container.title == "Hardware Selection Arguments":
115
+ if set(action.option_strings).isdisjoint(set(args)):
116
+ action.help = argparse.SUPPRESS
117
+ else:
118
+ action.help = action.help + " (currently selected)"
119
+ elif action.container.title == "Training Paradigm Arguments":
120
+ if set(action.option_strings).isdisjoint(set(args)):
121
+ action.help = argparse.SUPPRESS
122
+ else:
123
+ action.help = action.help + " (currently selected)"
124
+
125
+ action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
126
+ super().add_argument(action)
127
+
128
+ def end_section(self):
129
+ if len(self._current_section.items) < 2:
130
+ self._current_section.items = []
131
+ self._current_section.heading = ""
132
+ super().end_section()
133
+
134
+
135
+ def launch_command_parser(subparsers=None):
136
+ description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
137
+ if subparsers is not None:
138
+ parser = subparsers.add_parser(
139
+ "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
140
+ )
141
+ else:
142
+ parser = CustomArgumentParser(
143
+ "Accelerate launch command",
144
+ description=description,
145
+ add_help=False,
146
+ allow_abbrev=False,
147
+ formatter_class=CustomHelpFormatter,
148
+ )
149
+
150
+ parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
151
+
152
+ parser.add_argument(
153
+ "--config_file",
154
+ default=None,
155
+ help="The config file to use for the default values in the launching script.",
156
+ )
157
+ parser.add_argument(
158
+ "--quiet",
159
+ "-q",
160
+ action="store_true",
161
+ help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
162
+ )
163
+ # Hardware selection arguments
164
+ hardware_args = parser.add_argument_group(
165
+ "Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
166
+ )
167
+ hardware_args.add_argument(
168
+ "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
169
+ )
170
+ hardware_args.add_argument(
171
+ "--multi_gpu",
172
+ default=False,
173
+ action="store_true",
174
+ help="Whether or not this should launch a distributed GPU training.",
175
+ )
176
+ hardware_args.add_argument(
177
+ "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
178
+ )
179
+ hardware_args.add_argument(
180
+ "--ipex",
181
+ default=False,
182
+ action="store_true",
183
+ help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
184
+ )
185
+
186
+ # Resource selection arguments
187
+ resource_args = parser.add_argument_group(
188
+ "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
189
+ )
190
+ resource_args.add_argument(
191
+ "--mixed_precision",
192
+ type=str,
193
+ choices=["no", "fp16", "bf16", "fp8"],
194
+ help="Whether or not to use mixed precision training. "
195
+ "Choose between FP16 and BF16 (bfloat16) training. "
196
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
197
+ )
198
+ resource_args.add_argument(
199
+ "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
200
+ )
201
+ resource_args.add_argument(
202
+ "--num_machines", type=int, default=None, help="The total number of machines used in this training."
203
+ )
204
+ resource_args.add_argument(
205
+ "--num_cpu_threads_per_process",
206
+ type=int,
207
+ default=None,
208
+ help="The number of CPU threads per process. Can be tuned for optimal performance.",
209
+ )
210
+ resource_args.add_argument(
211
+ "--enable_cpu_affinity",
212
+ default=False,
213
+ action="store_true",
214
+ help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
215
+ )
216
+
217
+ # Dynamo arguments
218
+ resource_args.add_argument(
219
+ "--dynamo_backend",
220
+ type=str,
221
+ choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
222
+ help="Choose a backend to optimize your training with dynamo, see more at "
223
+ "https://github.com/pytorch/torchdynamo.",
224
+ )
225
+ resource_args.add_argument(
226
+ "--dynamo_mode",
227
+ type=str,
228
+ default="default",
229
+ choices=TORCH_DYNAMO_MODES,
230
+ help="Choose a mode to optimize your training with dynamo.",
231
+ )
232
+ resource_args.add_argument(
233
+ "--dynamo_use_fullgraph",
234
+ default=False,
235
+ action="store_true",
236
+ help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
237
+ )
238
+ resource_args.add_argument(
239
+ "--dynamo_use_dynamic",
240
+ default=False,
241
+ action="store_true",
242
+ help="Whether to enable dynamic shape tracing.",
243
+ )
244
+
245
+ # Training Paradigm arguments
246
+ paradigm_args = parser.add_argument_group(
247
+ "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
248
+ )
249
+ paradigm_args.add_argument(
250
+ "--use_deepspeed",
251
+ default=False,
252
+ action="store_true",
253
+ help="Whether to use deepspeed.",
254
+ )
255
+ paradigm_args.add_argument(
256
+ "--use_fsdp",
257
+ default=False,
258
+ action="store_true",
259
+ help="Whether to use fsdp.",
260
+ )
261
+ paradigm_args.add_argument(
262
+ "--use_megatron_lm",
263
+ default=False,
264
+ action="store_true",
265
+ help="Whether to use Megatron-LM.",
266
+ )
267
+ paradigm_args.add_argument(
268
+ "--use_xpu",
269
+ default=False,
270
+ action="store_true",
271
+ help="Whether to use IPEX plugin to speed up training on XPU specifically.",
272
+ )
273
+
274
+ # distributed GPU training arguments
275
+ distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
276
+ distributed_args.add_argument(
277
+ "--gpu_ids",
278
+ default=None,
279
+ help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
280
+ )
281
+ distributed_args.add_argument(
282
+ "--same_network",
283
+ default=False,
284
+ action="store_true",
285
+ help="Whether all machines used for multinode training exist on the same local network.",
286
+ )
287
+ distributed_args.add_argument(
288
+ "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
289
+ )
290
+ distributed_args.add_argument(
291
+ "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
292
+ )
293
+ distributed_args.add_argument(
294
+ "--main_process_port",
295
+ type=int,
296
+ default=None,
297
+ help="The port to use to communicate with the machine of rank 0.",
298
+ )
299
+ distributed_args.add_argument(
300
+ "-t",
301
+ "--tee",
302
+ default="0",
303
+ type=str,
304
+ help="Tee std streams into a log file and also to console.",
305
+ )
306
+ distributed_args.add_argument(
307
+ "--role",
308
+ type=str,
309
+ default="default",
310
+ help="User-defined role for the workers.",
311
+ )
312
+ # Rendezvous related arguments
313
+ distributed_args.add_argument(
314
+ "--rdzv_backend",
315
+ type=str,
316
+ default="static",
317
+ help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
318
+ )
319
+ distributed_args.add_argument(
320
+ "--rdzv_conf",
321
+ type=str,
322
+ default="",
323
+ help="Additional rendezvous configuration (<key1>=<value1>,<key2>=<value2>,...).",
324
+ )
325
+ distributed_args.add_argument(
326
+ "--max_restarts",
327
+ type=int,
328
+ default=0,
329
+ help="Maximum number of worker group restarts before failing.",
330
+ )
331
+ distributed_args.add_argument(
332
+ "--monitor_interval",
333
+ type=float,
334
+ default=5,
335
+ help="Interval, in seconds, to monitor the state of workers.",
336
+ )
337
+ parser.add_argument(
338
+ "-m",
339
+ "--module",
340
+ action="store_true",
341
+ help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
342
+ )
343
+ parser.add_argument(
344
+ "--no_python",
345
+ action="store_true",
346
+ help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
347
+ )
348
+
349
+ # TPU arguments
350
+ tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
351
+ tpu_args.add_argument(
352
+ "--tpu_cluster",
353
+ action="store_true",
354
+ dest="tpu_use_cluster",
355
+ help="Whether to use a GCP TPU pod for training.",
356
+ )
357
+ tpu_args.add_argument(
358
+ "--no_tpu_cluster",
359
+ action="store_false",
360
+ dest="tpu_use_cluster",
361
+ help="Should not be passed explicitly, this is for internal use only.",
362
+ )
363
+ tpu_args.add_argument(
364
+ "--tpu_use_sudo",
365
+ action="store_true",
366
+ help="Whether to use `sudo` when running the TPU training script in each pod.",
367
+ )
368
+ tpu_args.add_argument(
369
+ "--vm",
370
+ type=str,
371
+ action="append",
372
+ help=(
373
+ "List of single Compute VM instance names. "
374
+ "If not provided we assume usage of instance groups. For TPU pods."
375
+ ),
376
+ )
377
+ tpu_args.add_argument(
378
+ "--env",
379
+ type=str,
380
+ action="append",
381
+ help="List of environment variables to set on the Compute VM instances. For TPU pods.",
382
+ )
383
+ tpu_args.add_argument(
384
+ "--main_training_function",
385
+ type=str,
386
+ default=None,
387
+ help="The name of the main function to be executed in your script (only for TPU training).",
388
+ )
389
+ tpu_args.add_argument(
390
+ "--downcast_bf16",
391
+ action="store_true",
392
+ help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
393
+ )
394
+
395
+ # DeepSpeed arguments
396
+ deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
397
+ deepspeed_args.add_argument(
398
+ "--deepspeed_config_file",
399
+ default=None,
400
+ type=str,
401
+ help="DeepSpeed config file.",
402
+ )
403
+ deepspeed_args.add_argument(
404
+ "--zero_stage",
405
+ default=None,
406
+ type=int,
407
+ help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
408
+ "If unspecified, will default to `2`.",
409
+ )
410
+ deepspeed_args.add_argument(
411
+ "--offload_optimizer_device",
412
+ default=None,
413
+ type=str,
414
+ help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
415
+ "If unspecified, will default to 'none'.",
416
+ )
417
+ deepspeed_args.add_argument(
418
+ "--offload_param_device",
419
+ default=None,
420
+ type=str,
421
+ help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
422
+ "If unspecified, will default to 'none'.",
423
+ )
424
+ deepspeed_args.add_argument(
425
+ "--offload_optimizer_nvme_path",
426
+ default=None,
427
+ type=str,
428
+ help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
429
+ "If unspecified, will default to 'none'.",
430
+ )
431
+ deepspeed_args.add_argument(
432
+ "--offload_param_nvme_path",
433
+ default=None,
434
+ type=str,
435
+ help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
436
+ "If unspecified, will default to 'none'.",
437
+ )
438
+ deepspeed_args.add_argument(
439
+ "--gradient_accumulation_steps",
440
+ default=None,
441
+ type=int,
442
+ help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
443
+ "If unspecified, will default to `1`.",
444
+ )
445
+ deepspeed_args.add_argument(
446
+ "--gradient_clipping",
447
+ default=None,
448
+ type=float,
449
+ help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
450
+ "If unspecified, will default to `1.0`.",
451
+ )
452
+ deepspeed_args.add_argument(
453
+ "--zero3_init_flag",
454
+ default=None,
455
+ type=str,
456
+ help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
457
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
458
+ )
459
+ deepspeed_args.add_argument(
460
+ "--zero3_save_16bit_model",
461
+ default=None,
462
+ type=str,
463
+ help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
464
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
465
+ )
466
+ deepspeed_args.add_argument(
467
+ "--deepspeed_hostfile",
468
+ default=None,
469
+ type=str,
470
+ help="DeepSpeed hostfile for configuring multi-node compute resources.",
471
+ )
472
+ deepspeed_args.add_argument(
473
+ "--deepspeed_exclusion_filter",
474
+ default=None,
475
+ type=str,
476
+ help="DeepSpeed exclusion filter string when using mutli-node setup.",
477
+ )
478
+ deepspeed_args.add_argument(
479
+ "--deepspeed_inclusion_filter",
480
+ default=None,
481
+ type=str,
482
+ help="DeepSpeed inclusion filter string when using mutli-node setup.",
483
+ )
484
+ deepspeed_args.add_argument(
485
+ "--deepspeed_multinode_launcher",
486
+ default=None,
487
+ type=str,
488
+ help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
489
+ )
490
+
491
+ # fsdp arguments
492
+ fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
493
+ fsdp_args.add_argument(
494
+ "--fsdp_offload_params",
495
+ default="false",
496
+ type=str,
497
+ help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
498
+ )
499
+ fsdp_args.add_argument(
500
+ "--fsdp_min_num_params",
501
+ type=int,
502
+ default=1e8,
503
+ help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
504
+ )
505
+ fsdp_args.add_argument(
506
+ "--fsdp_sharding_strategy",
507
+ type=str,
508
+ default="FULL_SHARD",
509
+ help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
510
+ )
511
+ fsdp_args.add_argument(
512
+ "--fsdp_auto_wrap_policy",
513
+ type=str,
514
+ default=None,
515
+ help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
516
+ )
517
+ fsdp_args.add_argument(
518
+ "--fsdp_transformer_layer_cls_to_wrap",
519
+ default=None,
520
+ type=str,
521
+ help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
522
+ "(useful only when `use_fsdp` flag is passed).",
523
+ )
524
+ fsdp_args.add_argument(
525
+ "--fsdp_backward_prefetch_policy",
526
+ default=None,
527
+ type=str,
528
+ help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.",
529
+ )
530
+ fsdp_args.add_argument(
531
+ "--fsdp_backward_prefetch",
532
+ default=None,
533
+ type=str,
534
+ help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
535
+ )
536
+ fsdp_args.add_argument(
537
+ "--fsdp_state_dict_type",
538
+ default=None,
539
+ type=str,
540
+ help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
541
+ )
542
+ fsdp_args.add_argument(
543
+ "--fsdp_forward_prefetch",
544
+ default="false",
545
+ type=str,
546
+ help="If True, then FSDP explicitly prefetches the next upcoming "
547
+ "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
548
+ )
549
+ fsdp_args.add_argument(
550
+ "--fsdp_use_orig_params",
551
+ default="true",
552
+ type=str,
553
+ help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
554
+ " (useful only when `use_fsdp` flag is passed).",
555
+ )
556
+ fsdp_args.add_argument(
557
+ "--fsdp_cpu_ram_efficient_loading",
558
+ default="true",
559
+ type=str,
560
+ help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
561
+ "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
562
+ "(useful only when `use_fsdp` flag is passed).",
563
+ )
564
+ fsdp_args.add_argument(
565
+ "--fsdp_sync_module_states",
566
+ default="true",
567
+ type=str,
568
+ help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
569
+ " (useful only when `use_fsdp` flag is passed).",
570
+ )
571
+
572
+ # megatron_lm args
573
+ megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
574
+ megatron_lm_args.add_argument(
575
+ "--megatron_lm_tp_degree",
576
+ type=int,
577
+ default=1,
578
+ help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
579
+ )
580
+ megatron_lm_args.add_argument(
581
+ "--megatron_lm_pp_degree",
582
+ type=int,
583
+ default=1,
584
+ help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
585
+ )
586
+ megatron_lm_args.add_argument(
587
+ "--megatron_lm_num_micro_batches",
588
+ type=int,
589
+ default=None,
590
+ help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
591
+ )
592
+ megatron_lm_args.add_argument(
593
+ "--megatron_lm_sequence_parallelism",
594
+ default=None,
595
+ type=str,
596
+ help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
597
+ "(useful only when `use_megatron_lm` flag is passed).",
598
+ )
599
+ megatron_lm_args.add_argument(
600
+ "--megatron_lm_recompute_activations",
601
+ default=None,
602
+ type=str,
603
+ help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
604
+ "(useful only when `use_megatron_lm` flag is passed).",
605
+ )
606
+ megatron_lm_args.add_argument(
607
+ "--megatron_lm_use_distributed_optimizer",
608
+ default=None,
609
+ type=str,
610
+ help="Decides Whether (true|false) to use distributed optimizer "
611
+ "which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
612
+ "(useful only when `use_megatron_lm` flag is passed).",
613
+ )
614
+ megatron_lm_args.add_argument(
615
+ "--megatron_lm_gradient_clipping",
616
+ default=1.0,
617
+ type=float,
618
+ help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
619
+ "(useful only when `use_megatron_lm` flag is passed).",
620
+ )
621
+
622
+ # AWS arguments
623
+ aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
624
+ aws_args.add_argument(
625
+ "--aws_access_key_id",
626
+ type=str,
627
+ default=None,
628
+ help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
629
+ )
630
+ aws_args.add_argument(
631
+ "--aws_secret_access_key",
632
+ type=str,
633
+ default=None,
634
+ help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
635
+ )
636
+ parser.add_argument(
637
+ "--debug",
638
+ action="store_true",
639
+ help="Whether to print out the torch.distributed stack trace when something fails.",
640
+ )
641
+ parser.add_argument(
642
+ "training_script",
643
+ type=str,
644
+ help=(
645
+ "The full path to the script to be launched in parallel, followed by all the arguments for the training "
646
+ "script."
647
+ ),
648
+ )
649
+
650
+ # MPI arguments
651
+ mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
652
+ mpirun_args.add_argument(
653
+ "--mpirun_hostfile",
654
+ type=str,
655
+ default=None,
656
+ help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
657
+ "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
658
+ )
659
+ mpirun_args.add_argument(
660
+ "--mpirun_ccl",
661
+ type=int,
662
+ default=1,
663
+ help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
664
+ )
665
+
666
+ # Other arguments of the training scripts
667
+ parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
668
+
669
+ if subparsers is not None:
670
+ parser.set_defaults(func=launch_command)
671
+ return parser
672
+
673
+
674
+ def simple_launcher(args):
675
+ cmd, current_env = prepare_simple_launcher_cmd_env(args)
676
+
677
+ process = subprocess.Popen(cmd, env=current_env)
678
+ process.wait()
679
+ if process.returncode != 0:
680
+ if not args.quiet:
681
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
682
+ else:
683
+ sys.exit(1)
684
+
685
+
686
+ def multi_gpu_launcher(args):
687
+ import torch.distributed.run as distrib_run
688
+
689
+ current_env = prepare_multi_gpu_env(args)
690
+ if not check_cuda_p2p_ib_support():
691
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
692
+ warn = False
693
+ if "NCCL_P2P_DISABLE" not in current_env:
694
+ current_env["NCCL_P2P_DISABLE"] = "1"
695
+ warn = True
696
+ if "NCCL_IB_DISABLE" not in current_env:
697
+ current_env["NCCL_IB_DISABLE"] = "1"
698
+ warn = True
699
+ if warn:
700
+ logger.warning(message)
701
+
702
+ debug = getattr(args, "debug", False)
703
+ args = _filter_args(
704
+ args,
705
+ distrib_run.get_args_parser(),
706
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
707
+ )
708
+
709
+ with patch_environment(**current_env):
710
+ try:
711
+ distrib_run.run(args)
712
+ except Exception:
713
+ if is_rich_available() and debug:
714
+ console = get_console()
715
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
716
+ console.print_exception(suppress=[__file__], show_locals=False)
717
+ else:
718
+ raise
719
+
720
+
721
+ def deepspeed_launcher(args):
722
+ import torch.distributed.run as distrib_run
723
+
724
+ if not is_deepspeed_available():
725
+ raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
726
+ else:
727
+ from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
728
+
729
+ cmd, current_env = prepare_deepspeed_cmd_env(args)
730
+ if not check_cuda_p2p_ib_support():
731
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
732
+ warn = False
733
+ if "NCCL_P2P_DISABLE" not in current_env:
734
+ current_env["NCCL_P2P_DISABLE"] = "1"
735
+ warn = True
736
+ if "NCCL_IB_DISABLE" not in current_env:
737
+ current_env["NCCL_IB_DISABLE"] = "1"
738
+ warn = True
739
+ if warn:
740
+ logger.warning(message)
741
+
742
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
743
+ with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
744
+ valid_env_items = convert_dict_to_env_variables(current_env)
745
+ if len(valid_env_items) > 1:
746
+ f.writelines(valid_env_items)
747
+
748
+ process = subprocess.Popen(cmd, env=current_env)
749
+ process.wait()
750
+ if process.returncode != 0:
751
+ if not args.quiet:
752
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
753
+ else:
754
+ sys.exit(1)
755
+ else:
756
+ debug = getattr(args, "debug", False)
757
+ args = _filter_args(
758
+ args,
759
+ distrib_run.get_args_parser(),
760
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
761
+ )
762
+ with patch_environment(**current_env):
763
+ try:
764
+ distrib_run.run(args)
765
+ except Exception:
766
+ if is_rich_available() and debug:
767
+ console = get_console()
768
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
769
+ console.print_exception(suppress=[__file__], show_locals=False)
770
+ else:
771
+ raise
772
+
773
+
774
+ def tpu_launcher(args):
775
+ import torch_xla.distributed.xla_multiprocessing as xmp
776
+
777
+ if args.no_python:
778
+ raise ValueError("--no_python cannot be used with TPU launcher")
779
+
780
+ args, current_env = prepare_tpu(args, {})
781
+
782
+ if args.module:
783
+ mod_name = args.training_script
784
+ else:
785
+ # Import training_script as a module
786
+ script_path = Path(args.training_script)
787
+ sys.path.append(str(script_path.parent.resolve()))
788
+ mod_name = script_path.stem
789
+
790
+ mod = importlib.import_module(mod_name)
791
+ if not hasattr(mod, args.main_training_function):
792
+ raise ValueError(
793
+ f"Your training script should have a function named {args.main_training_function}, or you should pass a "
794
+ "different value to `--main_training_function`."
795
+ )
796
+
797
+ # Patch sys.argv
798
+ sys.argv = [mod.__file__] + args.training_script_args
799
+
800
+ main_function = getattr(mod, args.main_training_function)
801
+ with patch_environment(**current_env):
802
+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
803
+
804
+
805
+ def tpu_pod_launcher(args):
806
+ from torch_xla.distributed import xla_dist
807
+
808
+ current_env = {}
809
+ args, current_env = prepare_tpu(args, current_env, True)
810
+ debug = getattr(args, "debug", False)
811
+
812
+ training_script = args.training_script
813
+ training_script_args = args.training_script_args
814
+ new_args = _filter_args(
815
+ args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
816
+ )
817
+
818
+ if args.tpu_use_sudo:
819
+ new_cmd = ["sudo"]
820
+ else:
821
+ new_cmd = []
822
+
823
+ new_cmd += [
824
+ "accelerate-launch",
825
+ "--tpu",
826
+ "--no_tpu_cluster",
827
+ "--num_machines",
828
+ "1",
829
+ "--mixed_precision",
830
+ "no",
831
+ "--dynamo_backend",
832
+ "no",
833
+ "--num_processes",
834
+ str(args.num_processes),
835
+ "--main_training_function",
836
+ str(args.main_training_function),
837
+ training_script,
838
+ ] + training_script_args
839
+
840
+ new_args.positional = new_cmd
841
+ bad_flags = ""
842
+ for arg in vars(new_args):
843
+ if arg.startswith("docker_"):
844
+ value = getattr(new_args, arg)
845
+ if value != "" and value is not None:
846
+ bad_flags += f'{arg}="{value}"\n'
847
+ if bad_flags != "":
848
+ raise ValueError(
849
+ f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
850
+ )
851
+ new_args.env = [f"{k}={v}" for k, v in current_env.items()]
852
+ new_args.env.append("ACCELERATE_IN_TPU_POD=1")
853
+ try:
854
+ xla_dist.resolve_and_execute(new_args)
855
+ except Exception:
856
+ if is_rich_available() and debug:
857
+ console = get_console()
858
+ console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
859
+ console.print_exception(suppress=[__file__], show_locals=False)
860
+ else:
861
+ raise
862
+
863
+
864
+ def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
865
+ if not is_sagemaker_available():
866
+ raise ImportError(
867
+ "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
868
+ )
869
+ if args.module or args.no_python:
870
+ raise ValueError(
871
+ "SageMaker requires a python training script file and cannot be used with --module or --no_python"
872
+ )
873
+
874
+ from sagemaker.huggingface import HuggingFace
875
+
876
+ args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
877
+
878
+ huggingface_estimator = HuggingFace(**args)
879
+
880
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
881
+ print(f"You can find your model data at: {huggingface_estimator.model_data}")
882
+
883
+
884
+ def _validate_launch_command(args):
885
+ # Sanity checks
886
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
887
+ raise ValueError(
888
+ "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
889
+ )
890
+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
891
+ raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
892
+
893
+ defaults = None
894
+ warned = []
895
+ mp_from_config_flag = False
896
+ # Get the default from the config file.
897
+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
898
+ defaults = load_config_from_file(args.config_file)
899
+ if (
900
+ not args.multi_gpu
901
+ and not args.tpu
902
+ and not args.tpu_use_cluster
903
+ and not args.use_deepspeed
904
+ and not args.use_fsdp
905
+ and not args.use_megatron_lm
906
+ ):
907
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
908
+ args.multi_gpu = (
909
+ True
910
+ if defaults.distributed_type
911
+ in (
912
+ DistributedType.MULTI_GPU,
913
+ DistributedType.MULTI_NPU,
914
+ DistributedType.MULTI_MLU,
915
+ DistributedType.MULTI_XPU,
916
+ )
917
+ else False
918
+ )
919
+ args.tpu = defaults.distributed_type == DistributedType.XLA
920
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
921
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
922
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
923
+ if args.gpu_ids is None:
924
+ if defaults.gpu_ids is not None:
925
+ args.gpu_ids = defaults.gpu_ids
926
+ else:
927
+ args.gpu_ids = "all"
928
+
929
+ if args.multi_gpu and args.num_machines is None:
930
+ args.num_machines = defaults.num_machines
931
+
932
+ if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
933
+ raise ValueError(
934
+ "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
935
+ "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
936
+ )
937
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
938
+ # Update args with the defaults
939
+ for name, attr in defaults.__dict__.items():
940
+ if isinstance(attr, dict):
941
+ for k in defaults.deepspeed_config:
942
+ setattr(args, k, defaults.deepspeed_config[k])
943
+ for k in defaults.fsdp_config:
944
+ arg_to_set = k
945
+ if "fsdp" not in arg_to_set:
946
+ arg_to_set = "fsdp_" + arg_to_set
947
+ setattr(args, arg_to_set, defaults.fsdp_config[k])
948
+ for k in defaults.megatron_lm_config:
949
+ setattr(args, k, defaults.megatron_lm_config[k])
950
+ for k in defaults.dynamo_config:
951
+ setattr(args, k, defaults.dynamo_config[k])
952
+ for k in defaults.ipex_config:
953
+ setattr(args, k, defaults.ipex_config[k])
954
+ for k in defaults.mpirun_config:
955
+ setattr(args, k, defaults.mpirun_config[k])
956
+ continue
957
+
958
+ # Those args are handled separately
959
+ if (
960
+ name not in ["compute_environment", "mixed_precision", "distributed_type"]
961
+ and getattr(args, name, None) is None
962
+ ):
963
+ setattr(args, name, attr)
964
+ if not args.debug:
965
+ args.debug = defaults.debug
966
+
967
+ if not args.mixed_precision:
968
+ if defaults.mixed_precision is None:
969
+ args.mixed_precision = "no"
970
+ else:
971
+ args.mixed_precision = defaults.mixed_precision
972
+ mp_from_config_flag = True
973
+ else:
974
+ if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
975
+ native_amp = is_torch_version(">=", "1.10")
976
+ else:
977
+ native_amp = is_bf16_available(True)
978
+ if (
979
+ args.mixed_precision == "bf16"
980
+ and not native_amp
981
+ and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
982
+ ):
983
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
984
+
985
+ # Silently set the default here
986
+ if args.dynamo_backend is None:
987
+ args.dynamo_backend = "no"
988
+ else:
989
+ if args.num_processes is None:
990
+ if args.use_xpu and is_xpu_available():
991
+ args.num_processes = torch.xpu.device_count()
992
+ elif is_mlu_available():
993
+ args.num_processes = torch.mlu.device_count()
994
+ elif is_npu_available():
995
+ args.num_processes = torch.npu.device_count()
996
+ else:
997
+ args.num_processes = torch.cuda.device_count()
998
+ warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
999
+ if args.debug is None:
1000
+ args.debug = False
1001
+ if not args.multi_gpu and (
1002
+ (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
1003
+ or (is_mlu_available() and torch.mlu.device_count() > 1)
1004
+ or (is_npu_available() and torch.npu.device_count() > 1)
1005
+ or (torch.cuda.device_count() > 1)
1006
+ ):
1007
+ warned.append(
1008
+ "\t\tMore than one GPU was found, enabling multi-GPU training.\n"
1009
+ "\t\tIf this was unintended please pass in `--num_processes=1`."
1010
+ )
1011
+ args.multi_gpu = True
1012
+ if args.num_machines is None:
1013
+ warned.append("\t`--num_machines` was set to a value of `1`")
1014
+ args.num_machines = 1
1015
+ if args.mixed_precision is None:
1016
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
1017
+ args.mixed_precision = "no"
1018
+ if not hasattr(args, "use_cpu"):
1019
+ args.use_cpu = args.cpu
1020
+ if args.dynamo_backend is None:
1021
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
1022
+ args.dynamo_backend = "no"
1023
+ if args.debug:
1024
+ logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
1025
+
1026
+ is_aws_env_disabled = defaults is None or (
1027
+ defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
1028
+ )
1029
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
1030
+ args.num_cpu_threads_per_process = 1
1031
+ if args.use_cpu and args.num_processes >= 1:
1032
+ local_size = get_int_from_env(
1033
+ ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
1034
+ )
1035
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
1036
+ if threads_per_process > 1:
1037
+ args.num_cpu_threads_per_process = threads_per_process
1038
+ warned.append(
1039
+ f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
1040
+ )
1041
+
1042
+ if any(warned):
1043
+ message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
1044
+ message += "\n".join(warned)
1045
+ message += (
1046
+ "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
1047
+ )
1048
+ logger.warning(message)
1049
+ return args, defaults, mp_from_config_flag
1050
+
1051
+
1052
+ def launch_command(args):
1053
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
1054
+ # Use the proper launcher
1055
+ if args.use_deepspeed and not args.cpu:
1056
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
1057
+ if mp_from_config_flag:
1058
+ args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
1059
+ args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
1060
+ deepspeed_launcher(args)
1061
+ elif args.use_fsdp and not args.cpu:
1062
+ multi_gpu_launcher(args)
1063
+ elif args.use_megatron_lm and not args.cpu:
1064
+ multi_gpu_launcher(args)
1065
+ elif args.multi_gpu and not args.cpu:
1066
+ multi_gpu_launcher(args)
1067
+ elif args.tpu and not args.cpu:
1068
+ if args.tpu_use_cluster:
1069
+ tpu_pod_launcher(args)
1070
+ else:
1071
+ tpu_launcher(args)
1072
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
1073
+ sagemaker_launcher(defaults, args)
1074
+ else:
1075
+ simple_launcher(args)
1076
+
1077
+
1078
+ def main():
1079
+ parser = launch_command_parser()
1080
+ args = parser.parse_args()
1081
+ launch_command(args)
1082
+
1083
+
1084
+ if __name__ == "__main__":
1085
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from .selection_menu import BulletMenu
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (238 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc ADDED
Binary file (2.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc ADDED
Binary file (4.43 kB). View file
 
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import os
20
+ import sys
21
+ from contextlib import contextmanager
22
+
23
+
24
+ # Windows only
25
+ if os.name == "nt":
26
+ import ctypes
27
+ import msvcrt # noqa
28
+
29
+ class CursorInfo(ctypes.Structure):
30
+ # _fields is a specific attr expected by ctypes
31
+ _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
32
+
33
+
34
+ def hide_cursor():
35
+ if os.name == "nt":
36
+ ci = CursorInfo()
37
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
38
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
39
+ ci.visible = False
40
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
41
+ elif os.name == "posix":
42
+ sys.stdout.write("\033[?25l")
43
+ sys.stdout.flush()
44
+
45
+
46
+ def show_cursor():
47
+ if os.name == "nt":
48
+ ci = CursorInfo()
49
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
50
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
51
+ ci.visible = True
52
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
53
+ elif os.name == "posix":
54
+ sys.stdout.write("\033[?25h")
55
+ sys.stdout.flush()
56
+
57
+
58
+ @contextmanager
59
+ def hide():
60
+ "Context manager to hide the terminal cursor"
61
+ try:
62
+ hide_cursor()
63
+ yield
64
+ finally:
65
+ show_cursor()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ A variety of helper functions and constants when dealing with terminal menu choices, based on
17
+ https://github.com/bchao1/bullet
18
+ """
19
+
20
+ import enum
21
+ import shutil
22
+ import sys
23
+
24
+
25
+ TERMINAL_WIDTH, _ = shutil.get_terminal_size()
26
+
27
+ CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
28
+
29
+
30
+ class Direction(enum.Enum):
31
+ UP = 0
32
+ DOWN = 1
33
+
34
+
35
+ def forceWrite(content, end=""):
36
+ sys.stdout.write(str(content) + end)
37
+ sys.stdout.flush()
38
+
39
+
40
+ def writeColor(content, color, end=""):
41
+ forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
42
+
43
+
44
+ def reset_cursor():
45
+ forceWrite("\r")
46
+
47
+
48
+ def move_cursor(num_lines: int, direction: str):
49
+ forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
50
+
51
+
52
+ def clear_line():
53
+ forceWrite(" " * TERMINAL_WIDTH)
54
+ reset_cursor()
55
+
56
+
57
+ def linebreak():
58
+ reset_cursor()
59
+ forceWrite("-" * TERMINAL_WIDTH)
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/input.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ This file contains utilities for handling input from the user and registering specific keys to specific functions,
17
+ based on https://github.com/bchao1/bullet
18
+ """
19
+
20
+ from typing import List
21
+
22
+ from .keymap import KEYMAP, get_character
23
+
24
+
25
+ def mark(key: str):
26
+ """
27
+ Mark the function with the key code so it can be handled in the register
28
+ """
29
+
30
+ def decorator(func):
31
+ handle = getattr(func, "handle_key", [])
32
+ handle += [key]
33
+ func.handle_key = handle
34
+ return func
35
+
36
+ return decorator
37
+
38
+
39
+ def mark_multiple(*keys: List[str]):
40
+ """
41
+ Mark the function with the key codes so it can be handled in the register
42
+ """
43
+
44
+ def decorator(func):
45
+ handle = getattr(func, "handle_key", [])
46
+ handle += keys
47
+ func.handle_key = handle
48
+ return func
49
+
50
+ return decorator
51
+
52
+
53
+ class KeyHandler(type):
54
+ """
55
+ Metaclass that adds the key handlers to the class
56
+ """
57
+
58
+ def __new__(cls, name, bases, attrs):
59
+ new_cls = super().__new__(cls, name, bases, attrs)
60
+ if not hasattr(new_cls, "key_handler"):
61
+ new_cls.key_handler = {}
62
+ new_cls.handle_input = KeyHandler.handle_input
63
+
64
+ for value in attrs.values():
65
+ handled_keys = getattr(value, "handle_key", [])
66
+ for key in handled_keys:
67
+ new_cls.key_handler[key] = value
68
+ return new_cls
69
+
70
+ @staticmethod
71
+ def handle_input(cls):
72
+ "Finds and returns the selected character if it exists in the handler"
73
+ char = get_character()
74
+ if char != KEYMAP["undefined"]:
75
+ char = ord(char)
76
+ handler = cls.key_handler.get(char)
77
+ if handler:
78
+ cls.current_selection = char
79
+ return handler(cls)
80
+ else:
81
+ return None
82
+
83
+
84
+ def register(cls):
85
+ """Adds KeyHandler metaclass to the class"""
86
+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import os
20
+ import string
21
+ import sys
22
+
23
+
24
+ ARROW_KEY_FLAG = 1 << 8
25
+
26
+ KEYMAP = {
27
+ "tab": ord("\t"),
28
+ "newline": ord("\r"),
29
+ "esc": 27,
30
+ "up": 65 + ARROW_KEY_FLAG,
31
+ "down": 66 + ARROW_KEY_FLAG,
32
+ "right": 67 + ARROW_KEY_FLAG,
33
+ "left": 68 + ARROW_KEY_FLAG,
34
+ "mod_int": 91,
35
+ "undefined": sys.maxsize,
36
+ "interrupt": 3,
37
+ "insert": 50,
38
+ "delete": 51,
39
+ "pg_up": 53,
40
+ "pg_down": 54,
41
+ }
42
+
43
+ KEYMAP["arrow_begin"] = KEYMAP["up"]
44
+ KEYMAP["arrow_end"] = KEYMAP["left"]
45
+
46
+ if sys.platform == "win32":
47
+ WIN_CH_BUFFER = []
48
+ WIN_KEYMAP = {
49
+ b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
50
+ b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
51
+ b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
52
+ b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
53
+ b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
54
+ b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
55
+ b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
56
+ b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
57
+ }
58
+
59
+ for i in range(10):
60
+ KEYMAP[str(i)] = ord(str(i))
61
+
62
+
63
+ def get_raw_chars():
64
+ "Gets raw characters from inputs"
65
+ if os.name == "nt":
66
+ import msvcrt
67
+
68
+ encoding = "mbcs"
69
+ # Flush the keyboard buffer
70
+ while msvcrt.kbhit():
71
+ msvcrt.getch()
72
+ if len(WIN_CH_BUFFER) == 0:
73
+ # Read the keystroke
74
+ ch = msvcrt.getch()
75
+
76
+ # If it is a prefix char, get second part
77
+ if ch in (b"\x00", b"\xe0"):
78
+ ch2 = ch + msvcrt.getch()
79
+ # Translate actual Win chars to bullet char types
80
+ try:
81
+ chx = chr(WIN_KEYMAP[ch2])
82
+ WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
83
+ WIN_CH_BUFFER.append(chx)
84
+ if ord(chx) in (
85
+ KEYMAP["insert"] - 1 << 9,
86
+ KEYMAP["delete"] - 1 << 9,
87
+ KEYMAP["pg_up"] - 1 << 9,
88
+ KEYMAP["pg_down"] - 1 << 9,
89
+ ):
90
+ WIN_CH_BUFFER.append(chr(126))
91
+ ch = chr(KEYMAP["esc"])
92
+ except KeyError:
93
+ ch = ch2[1]
94
+ else:
95
+ ch = ch.decode(encoding)
96
+ else:
97
+ ch = WIN_CH_BUFFER.pop(0)
98
+ elif os.name == "posix":
99
+ import termios
100
+ import tty
101
+
102
+ fd = sys.stdin.fileno()
103
+ old_settings = termios.tcgetattr(fd)
104
+ try:
105
+ tty.setraw(fd)
106
+ ch = sys.stdin.read(1)
107
+ finally:
108
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
109
+ return ch
110
+
111
+
112
+ def get_character():
113
+ "Gets a character from the keyboard and returns the key code"
114
+ char = get_raw_chars()
115
+ if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
116
+ return char
117
+
118
+ elif ord(char) == KEYMAP["esc"]:
119
+ combo = get_raw_chars()
120
+ if ord(combo) == KEYMAP["mod_int"]:
121
+ key = get_raw_chars()
122
+ if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
123
+ return chr(ord(key) + ARROW_KEY_FLAG)
124
+ else:
125
+ return KEYMAP["undefined"]
126
+ else:
127
+ return get_raw_chars()
128
+
129
+ else:
130
+ if char in string.printable:
131
+ return char
132
+ else:
133
+ return KEYMAP["undefined"]
env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """
16
+ Main driver for the selection menu, based on https://github.com/bchao1/bullet
17
+ """
18
+
19
+ import builtins
20
+ import sys
21
+
22
+ from ...utils.imports import _is_package_available
23
+ from . import cursor, input
24
+ from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
25
+ from .keymap import KEYMAP
26
+
27
+
28
+ in_colab = False
29
+ try:
30
+ in_colab = _is_package_available("google.colab")
31
+ except ModuleNotFoundError:
32
+ pass
33
+
34
+
35
+ @input.register
36
+ class BulletMenu:
37
+ """
38
+ A CLI menu to select a choice from a list of choices using the keyboard.
39
+ """
40
+
41
+ def __init__(self, prompt: str = None, choices: list = []):
42
+ self.position = 0
43
+ self.choices = choices
44
+ self.prompt = prompt
45
+ if sys.platform == "win32":
46
+ self.arrow_char = "*"
47
+ else:
48
+ self.arrow_char = "➔ "
49
+
50
+ def write_choice(self, index, end: str = ""):
51
+ if sys.platform != "win32":
52
+ writeColor(self.choices[index], 32, end)
53
+ else:
54
+ forceWrite(self.choices[index], end)
55
+
56
+ def print_choice(self, index: int):
57
+ "Prints the choice at the given index"
58
+ if index == self.position:
59
+ forceWrite(f" {self.arrow_char} ")
60
+ self.write_choice(index)
61
+ else:
62
+ forceWrite(f" {self.choices[index]}")
63
+ reset_cursor()
64
+
65
+ def move_direction(self, direction: Direction, num_spaces: int = 1):
66
+ "Should not be directly called, used to move a direction of either up or down"
67
+ old_position = self.position
68
+ if direction == Direction.DOWN:
69
+ if self.position + 1 >= len(self.choices):
70
+ return
71
+ self.position += num_spaces
72
+ else:
73
+ if self.position - 1 < 0:
74
+ return
75
+ self.position -= num_spaces
76
+ clear_line()
77
+ self.print_choice(old_position)
78
+ move_cursor(num_spaces, direction.name)
79
+ self.print_choice(self.position)
80
+
81
+ @input.mark(KEYMAP["up"])
82
+ def move_up(self):
83
+ self.move_direction(Direction.UP)
84
+
85
+ @input.mark(KEYMAP["down"])
86
+ def move_down(self):
87
+ self.move_direction(Direction.DOWN)
88
+
89
+ @input.mark(KEYMAP["newline"])
90
+ def select(self):
91
+ move_cursor(len(self.choices) - self.position, "DOWN")
92
+ return self.position
93
+
94
+ @input.mark(KEYMAP["interrupt"])
95
+ def interrupt(self):
96
+ move_cursor(len(self.choices) - self.position, "DOWN")
97
+ raise KeyboardInterrupt
98
+
99
+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
100
+ def select_row(self):
101
+ index = int(chr(self.current_selection))
102
+ movement = index - self.position
103
+ if index == self.position:
104
+ return
105
+ if index < len(self.choices):
106
+ if self.position > index:
107
+ self.move_direction(Direction.UP, -movement)
108
+ elif self.position < index:
109
+ self.move_direction(Direction.DOWN, movement)
110
+ else:
111
+ return
112
+ else:
113
+ return
114
+
115
+ def run(self, default_choice: int = 0):
116
+ "Start the menu and return the selected choice"
117
+ if self.prompt:
118
+ linebreak()
119
+ forceWrite(self.prompt, "\n")
120
+ if in_colab:
121
+ forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
122
+ else:
123
+ forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
124
+ self.position = default_choice
125
+ for i in range(len(self.choices)):
126
+ self.print_choice(i)
127
+ forceWrite("\n")
128
+ move_cursor(len(self.choices) - self.position, "UP")
129
+ with cursor.hide():
130
+ while True:
131
+ if in_colab:
132
+ try:
133
+ choice = int(builtins.input())
134
+ except ValueError:
135
+ choice = default_choice
136
+ else:
137
+ choice = self.handle_input()
138
+ if choice is not None:
139
+ reset_cursor()
140
+ for _ in range(len(self.choices) + 1):
141
+ move_cursor(1, "UP")
142
+ clear_line()
143
+ self.write_choice(choice, "\n")
144
+ return choice
env-llmeval/lib/python3.10/site-packages/accelerate/commands/test.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+
19
+ from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
20
+
21
+
22
+ def test_command_parser(subparsers=None):
23
+ if subparsers is not None:
24
+ parser = subparsers.add_parser("test")
25
+ else:
26
+ parser = argparse.ArgumentParser("Accelerate test command")
27
+
28
+ parser.add_argument(
29
+ "--config_file",
30
+ default=None,
31
+ help=(
32
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
33
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
34
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
35
+ "with 'huggingface'."
36
+ ),
37
+ )
38
+
39
+ if subparsers is not None:
40
+ parser.set_defaults(func=test_command)
41
+ return parser
42
+
43
+
44
+ def test_command(args):
45
+ script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
46
+
47
+ if args.config_file is None:
48
+ test_args = [script_name]
49
+ else:
50
+ test_args = f"--config_file={args.config_file} {script_name}".split()
51
+
52
+ cmd = ["accelerate-launch"] + test_args
53
+ result = execute_subprocess_async(cmd)
54
+ if result.returncode == 0:
55
+ print("Test is a success! You are ready for your distributed training!")
56
+
57
+
58
+ def main():
59
+ parser = test_command_parser()
60
+ args = parser.parse_args()
61
+ test_command(args)
62
+
63
+
64
+ if __name__ == "__main__":
65
+ main()
env-llmeval/lib/python3.10/site-packages/accelerate/commands/tpu.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import os
19
+ import subprocess
20
+
21
+ from packaging.version import Version, parse
22
+
23
+ from accelerate.commands.config.config_args import default_config_file, load_config_from_file
24
+
25
+
26
+ _description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
27
+
28
+
29
+ def tpu_command_parser(subparsers=None):
30
+ if subparsers is not None:
31
+ parser = subparsers.add_parser("tpu-config", description=_description)
32
+ else:
33
+ parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
34
+ # Core arguments
35
+ config_args = parser.add_argument_group(
36
+ "Config Arguments", "Arguments that can be configured through `accelerate config`."
37
+ )
38
+ config_args.add_argument(
39
+ "--config_file",
40
+ type=str,
41
+ default=None,
42
+ help="Path to the config file to use for accelerate.",
43
+ )
44
+ config_args.add_argument(
45
+ "--tpu_name",
46
+ default=None,
47
+ help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
48
+ )
49
+ config_args.add_argument(
50
+ "--tpu_zone",
51
+ default=None,
52
+ help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
53
+ )
54
+ pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
55
+ pod_args.add_argument(
56
+ "--use_alpha",
57
+ action="store_true",
58
+ help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
59
+ )
60
+ pod_args.add_argument(
61
+ "--command_file",
62
+ default=None,
63
+ help="The path to the file containing the commands to run on the pod on startup.",
64
+ )
65
+ pod_args.add_argument(
66
+ "--command",
67
+ action="append",
68
+ nargs="+",
69
+ help="A command to run on the pod. Can be passed multiple times.",
70
+ )
71
+ pod_args.add_argument(
72
+ "--install_accelerate",
73
+ action="store_true",
74
+ help="Whether to install accelerate on the pod. Defaults to False.",
75
+ )
76
+ pod_args.add_argument(
77
+ "--accelerate_version",
78
+ default="latest",
79
+ help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
80
+ )
81
+ pod_args.add_argument(
82
+ "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
83
+ )
84
+
85
+ if subparsers is not None:
86
+ parser.set_defaults(func=tpu_command_launcher)
87
+ return parser
88
+
89
+
90
+ def tpu_command_launcher(args):
91
+ defaults = None
92
+
93
+ # Get the default from the config file if it exists.
94
+ if args.config_file is not None or os.path.isfile(default_config_file):
95
+ defaults = load_config_from_file(args.config_file)
96
+ if not args.command_file and defaults.command_file is not None and not args.command:
97
+ args.command_file = defaults.command_file
98
+ if not args.command and defaults.commands is not None:
99
+ args.command = defaults.commands
100
+ if not args.tpu_name:
101
+ args.tpu_name = defaults.tpu_name
102
+ if not args.tpu_zone:
103
+ args.tpu_zone = defaults.tpu_zone
104
+ if args.accelerate_version == "dev":
105
+ args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
106
+ elif args.accelerate_version == "latest":
107
+ args.accelerate_version = "accelerate -U"
108
+ elif isinstance(parse(args.accelerate_version), Version):
109
+ args.accelerate_version = f"accelerate=={args.accelerate_version}"
110
+
111
+ if not args.command_file and not args.command:
112
+ raise ValueError("You must specify either a command file or a command to run on the pod.")
113
+
114
+ if args.command_file:
115
+ with open(args.command_file) as f:
116
+ args.command = [f.read().splitlines()]
117
+
118
+ # To turn list of lists into list of strings
119
+ if isinstance(args.command[0], list):
120
+ args.command = [line for cmd in args.command for line in cmd]
121
+ # Default to the shared folder and install accelerate
122
+ new_cmd = ["cd /usr/share"]
123
+ if args.install_accelerate:
124
+ new_cmd += [f"pip install {args.accelerate_version}"]
125
+ new_cmd += args.command
126
+ args.command = "; ".join(new_cmd)
127
+
128
+ # Then send it to gcloud
129
+ # Eventually try to use google-api-core to do this instead of subprocess
130
+ cmd = ["gcloud"]
131
+ if args.use_alpha:
132
+ cmd += ["alpha"]
133
+ cmd += [
134
+ "compute",
135
+ "tpus",
136
+ "tpu-vm",
137
+ "ssh",
138
+ args.tpu_name,
139
+ "--zone",
140
+ args.tpu_zone,
141
+ "--command",
142
+ args.command,
143
+ "--worker",
144
+ "all",
145
+ ]
146
+ if args.debug:
147
+ print(f"Running {' '.join(cmd)}")
148
+ return
149
+ subprocess.run(cmd)
150
+ print("Successfully setup pod.")
151
+
152
+
153
+ def main():
154
+ parser = tpu_command_parser()
155
+ args = parser.parse_args()
156
+
157
+ tpu_command_launcher(args)
env-llmeval/lib/python3.10/site-packages/accelerate/commands/utils.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import argparse
16
+
17
+
18
+ class _StoreAction(argparse.Action):
19
+ """
20
+ Custom action that allows for `-` or `_` to be passed in for an argument.
21
+ """
22
+
23
+ def __init__(self, *args, **kwargs):
24
+ super().__init__(*args, **kwargs)
25
+ new_option_strings = []
26
+ for option_string in self.option_strings:
27
+ new_option_strings.append(option_string)
28
+ if "_" in option_string[2:]:
29
+ # Add `-` version to the option string
30
+ new_option_strings.append(option_string.replace("_", "-"))
31
+ self.option_strings = new_option_strings
32
+
33
+ def __call__(self, parser, namespace, values, option_string=None):
34
+ setattr(namespace, self.dest, values)
35
+
36
+
37
+ class _StoreConstAction(_StoreAction):
38
+ """
39
+ Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
40
+ """
41
+
42
+ def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
43
+ super().__init__(
44
+ option_strings=option_strings,
45
+ dest=dest,
46
+ nargs=0,
47
+ const=const,
48
+ default=default,
49
+ required=required,
50
+ help=help,
51
+ )
52
+
53
+ def __call__(self, parser, namespace, values, option_string=None):
54
+ setattr(namespace, self.dest, self.const)
55
+
56
+
57
+ class _StoreTrueAction(_StoreConstAction):
58
+ """
59
+ Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
60
+ """
61
+
62
+ def __init__(
63
+ self,
64
+ option_strings,
65
+ dest,
66
+ default=None,
67
+ required=False,
68
+ help=None,
69
+ ):
70
+ super().__init__(
71
+ option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
72
+ )
73
+
74
+
75
+ class CustomArgumentGroup(argparse._ArgumentGroup):
76
+ """
77
+ Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
78
+ when applicable.
79
+ """
80
+
81
+ def _add_action(self, action):
82
+ args = vars(action)
83
+ if isinstance(action, argparse._StoreTrueAction):
84
+ action = _StoreTrueAction(
85
+ args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
86
+ )
87
+ elif isinstance(action, argparse._StoreConstAction):
88
+ action = _StoreConstAction(
89
+ args["option_strings"],
90
+ args["dest"],
91
+ args["const"],
92
+ args["default"],
93
+ args["required"],
94
+ args["help"],
95
+ )
96
+ elif isinstance(action, argparse._StoreAction):
97
+ action = _StoreAction(**args)
98
+ action = super()._add_action(action)
99
+ return action
100
+
101
+
102
+ class CustomArgumentParser(argparse.ArgumentParser):
103
+ """
104
+ Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
105
+ when applicable.
106
+ """
107
+
108
+ def add_argument(self, *args, **kwargs):
109
+ if "action" in kwargs:
110
+ # Translate action -> class
111
+ if kwargs["action"] == "store_true":
112
+ kwargs["action"] = _StoreTrueAction
113
+ else:
114
+ kwargs["action"] = _StoreAction
115
+ super().add_argument(*args, **kwargs)
116
+
117
+ def add_argument_group(self, *args, **kwargs):
118
+ group = CustomArgumentGroup(self, *args, **kwargs)
119
+ self._action_groups.append(group)
120
+ return group
env-llmeval/lib/python3.10/site-packages/accelerate/data_loader.py ADDED
@@ -0,0 +1,1093 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+ from contextlib import suppress
17
+ from typing import Callable, List, Optional, Union
18
+
19
+ import torch
20
+ from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
21
+
22
+ from .logging import get_logger
23
+ from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
24
+ from .utils import (
25
+ RNGType,
26
+ broadcast,
27
+ broadcast_object_list,
28
+ concatenate,
29
+ find_batch_size,
30
+ get_data_structure,
31
+ initialize_tensors,
32
+ is_torch_version,
33
+ send_to_device,
34
+ slice_tensors,
35
+ synchronize_rng_states,
36
+ )
37
+
38
+
39
+ logger = get_logger(__name__)
40
+
41
+ # kwargs of the DataLoader in min version 1.4.0.
42
+ _PYTORCH_DATALOADER_KWARGS = {
43
+ "batch_size": 1,
44
+ "shuffle": False,
45
+ "sampler": None,
46
+ "batch_sampler": None,
47
+ "num_workers": 0,
48
+ "collate_fn": None,
49
+ "pin_memory": False,
50
+ "drop_last": False,
51
+ "timeout": 0,
52
+ "worker_init_fn": None,
53
+ "multiprocessing_context": None,
54
+ "generator": None,
55
+ "prefetch_factor": 2,
56
+ "persistent_workers": False,
57
+ }
58
+
59
+ # kwargs added after by version
60
+ _PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
61
+
62
+ for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
63
+ if is_torch_version(">=", v):
64
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
65
+
66
+
67
+ class SeedableRandomSampler(RandomSampler):
68
+ """
69
+ Same as a random sampler, except that in `__iter__` a seed can be used.
70
+
71
+ Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
72
+ and be fully reproducable on multiple iterations.
73
+
74
+ If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
75
+ (stored in `self.epoch`).
76
+ """
77
+
78
+ def __init__(self, *args, **kwargs):
79
+ super().__init__(*args, **kwargs)
80
+ self.epoch = 0
81
+ self.initial_seed = torch.random.initial_seed()
82
+
83
+ def __iter__(self):
84
+ if self.generator is None:
85
+ self.generator = torch.Generator()
86
+ self.generator.manual_seed(self.initial_seed)
87
+
88
+ # Allow `self.epoch` to modify the seed of the generator
89
+ seed = self.epoch + self.initial_seed
90
+ # print("Setting seed at epoch", self.epoch, seed)
91
+ self.generator.manual_seed(seed)
92
+ yield from super().__iter__()
93
+ self.set_epoch(self.epoch + 1)
94
+
95
+ def set_epoch(self, epoch: int):
96
+ "Sets the current iteration of the sampler."
97
+ self.epoch = epoch
98
+
99
+
100
+ class BatchSamplerShard(BatchSampler):
101
+ """
102
+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
103
+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
104
+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
105
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
106
+
107
+ Args:
108
+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):
109
+ The batch sampler to split in several shards.
110
+ num_processes (`int`, *optional*, defaults to 1):
111
+ The number of processes running concurrently.
112
+ process_index (`int`, *optional*, defaults to 0):
113
+ The index of the current process.
114
+ split_batches (`bool`, *optional*, defaults to `False`):
115
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
116
+ yielding different full batches on each process.
117
+
118
+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
119
+
120
+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
121
+ this argument is set to `False`.
122
+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
123
+ then `[6, 7]` if this argument is set to `True`.
124
+ even_batches (`bool`, *optional*, defaults to `True`):
125
+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
126
+ multiple of (original batch size / number of processes).
127
+
128
+ <Tip warning={true}>
129
+
130
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
131
+ equal to `False`
132
+
133
+ </Tip>"""
134
+
135
+ def __init__(
136
+ self,
137
+ batch_sampler: BatchSampler,
138
+ num_processes: int = 1,
139
+ process_index: int = 0,
140
+ split_batches: bool = False,
141
+ even_batches: bool = True,
142
+ ):
143
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
144
+ raise ValueError(
145
+ f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
146
+ f"needs to be a round multiple of the number of processes ({num_processes})."
147
+ )
148
+ self.batch_sampler = batch_sampler
149
+ self.num_processes = num_processes
150
+ self.process_index = process_index
151
+ self.split_batches = split_batches
152
+ self.even_batches = even_batches
153
+ self.batch_size = getattr(batch_sampler, "batch_size", None)
154
+ self.drop_last = getattr(batch_sampler, "drop_last", False)
155
+ if self.batch_size is None and self.even_batches:
156
+ raise ValueError(
157
+ "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
158
+ "are not calling this method directly, set `accelerator.even_batches=False` instead."
159
+ )
160
+
161
+ @property
162
+ def total_length(self):
163
+ return len(self.batch_sampler)
164
+
165
+ def __len__(self):
166
+ if self.split_batches:
167
+ # Split batches does not change the length of the batch sampler
168
+ return len(self.batch_sampler)
169
+ if len(self.batch_sampler) % self.num_processes == 0:
170
+ # If the length is a round multiple of the number of processes, it's easy.
171
+ return len(self.batch_sampler) // self.num_processes
172
+ length = len(self.batch_sampler) // self.num_processes
173
+ if self.drop_last:
174
+ # Same if we drop the remainder.
175
+ return length
176
+ elif self.even_batches:
177
+ # When we even batches we always get +1
178
+ return length + 1
179
+ else:
180
+ # Otherwise it depends on the process index.
181
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
182
+
183
+ def __iter__(self):
184
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
185
+
186
+ def _iter_with_split(self):
187
+ initial_data = []
188
+ batch_length = self.batch_sampler.batch_size // self.num_processes
189
+ for idx, batch in enumerate(self.batch_sampler):
190
+ if idx == 0:
191
+ initial_data = batch
192
+ if len(batch) == self.batch_size:
193
+ # If the batch is full, we yield the part of it this process is responsible of.
194
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
195
+
196
+ # If drop_last is True of the last batch was full, iteration is over, otherwise...
197
+ if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
198
+ if not self.even_batches:
199
+ if len(batch) > batch_length * self.process_index:
200
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
201
+ else:
202
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
203
+ while len(initial_data) < self.batch_size:
204
+ initial_data += initial_data
205
+ batch = batch + initial_data
206
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
207
+
208
+ def _iter_with_no_split(self):
209
+ initial_data = []
210
+ batch_to_yield = []
211
+ for idx, batch in enumerate(self.batch_sampler):
212
+ # We gather the initial indices in case we need to circle back at the end.
213
+ if not self.drop_last and idx < self.num_processes:
214
+ initial_data += batch
215
+ # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
216
+ # yielding it.
217
+ if idx % self.num_processes == self.process_index:
218
+ batch_to_yield = batch
219
+ if idx % self.num_processes == self.num_processes - 1 and (
220
+ self.batch_size is None or len(batch) == self.batch_size
221
+ ):
222
+ yield batch_to_yield
223
+ batch_to_yield = []
224
+
225
+ # If drop_last is True, iteration is over, otherwise...
226
+ if not self.drop_last and len(initial_data) > 0:
227
+ if not self.even_batches:
228
+ if len(batch_to_yield) > 0:
229
+ yield batch_to_yield
230
+ else:
231
+ # ... we yield the complete batch we had saved before if it has the proper length
232
+ if len(batch_to_yield) == self.batch_size:
233
+ yield batch_to_yield
234
+
235
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
236
+ while len(initial_data) < self.num_processes * self.batch_size:
237
+ initial_data += initial_data
238
+
239
+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
240
+ if len(batch) == self.batch_size:
241
+ batch = []
242
+ idx += 1
243
+
244
+ # Make sure we yield a multiple of self.num_processes batches
245
+ cycle_index = 0
246
+ while idx % self.num_processes != 0 or len(batch) > 0:
247
+ end_index = cycle_index + self.batch_size - len(batch)
248
+ batch += initial_data[cycle_index:end_index]
249
+ if idx % self.num_processes == self.process_index:
250
+ yield batch
251
+ cycle_index = end_index
252
+ batch = []
253
+ idx += 1
254
+
255
+
256
+ class IterableDatasetShard(IterableDataset):
257
+ """
258
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
259
+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
260
+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
261
+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
262
+ be too small or loop with indices from the beginning.
263
+
264
+ Args:
265
+ dataset (`torch.utils.data.dataset.IterableDataset`):
266
+ The batch sampler to split in several shards.
267
+ batch_size (`int`, *optional*, defaults to 1):
268
+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
269
+ `split_batches=True`).
270
+ drop_last (`bool`, *optional*, defaults to `False`):
271
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
272
+ beginning.
273
+ num_processes (`int`, *optional*, defaults to 1):
274
+ The number of processes running concurrently.
275
+ process_index (`int`, *optional*, defaults to 0):
276
+ The index of the current process.
277
+ split_batches (`bool`, *optional*, defaults to `False`):
278
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
279
+ yielding different full batches on each process.
280
+
281
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
282
+
283
+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
284
+ argument is set to `False`.
285
+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
286
+ this argument is set to `True`.
287
+ """
288
+
289
+ def __init__(
290
+ self,
291
+ dataset: IterableDataset,
292
+ batch_size: int = 1,
293
+ drop_last: bool = False,
294
+ num_processes: int = 1,
295
+ process_index: int = 0,
296
+ split_batches: bool = False,
297
+ ):
298
+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:
299
+ raise ValueError(
300
+ f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
301
+ f"needs to be a round multiple of the number of processes ({num_processes})."
302
+ )
303
+ self.dataset = dataset
304
+ self.batch_size = batch_size
305
+ self.drop_last = drop_last
306
+ self.num_processes = num_processes
307
+ self.process_index = process_index
308
+ self.split_batches = split_batches
309
+
310
+ def set_epoch(self, epoch):
311
+ self.epoch = epoch
312
+ if hasattr(self.dataset, "set_epoch"):
313
+ self.dataset.set_epoch(epoch)
314
+
315
+ def __len__(self):
316
+ # We will just raise the downstream error if the underlying dataset is not sized
317
+ if self.drop_last:
318
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
319
+ else:
320
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
321
+
322
+ def __iter__(self):
323
+ if (
324
+ not hasattr(self.dataset, "set_epoch")
325
+ and hasattr(self.dataset, "generator")
326
+ and isinstance(self.dataset.generator, torch.Generator)
327
+ ):
328
+ self.dataset.generator.manual_seed(self.epoch)
329
+ real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
330
+ process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
331
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
332
+
333
+ first_batch = None
334
+ current_batch = []
335
+ for element in self.dataset:
336
+ current_batch.append(element)
337
+ # Wait to have a full batch before yielding elements.
338
+ if len(current_batch) == real_batch_size:
339
+ for i in process_slice:
340
+ yield current_batch[i]
341
+ if first_batch is None:
342
+ first_batch = current_batch.copy()
343
+ current_batch = []
344
+
345
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
346
+ if not self.drop_last and len(current_batch) > 0:
347
+ if first_batch is None:
348
+ first_batch = current_batch.copy()
349
+ while len(current_batch) < real_batch_size:
350
+ current_batch += first_batch
351
+ for i in process_slice:
352
+ yield current_batch[i]
353
+
354
+
355
+ class DataLoaderStateMixin:
356
+ """
357
+ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
358
+ end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
359
+ useful information that might be needed.
360
+
361
+ **Available attributes:**
362
+
363
+ - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
364
+ - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
365
+ batch size
366
+
367
+ """
368
+
369
+ def __init_subclass__(cls, **kwargs):
370
+ cls.end_of_dataloader = False
371
+ cls.remainder = -1
372
+
373
+ def reset(self):
374
+ self.end_of_dataloader = False
375
+ self.remainder = -1
376
+
377
+ def begin(self):
378
+ "Prepares the gradient state for the current dataloader"
379
+ self.reset()
380
+ with suppress(Exception):
381
+ if not self._drop_last:
382
+ length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
383
+ self.remainder = length % self.total_batch_size
384
+ self.gradient_state._add_dataloader(self)
385
+
386
+ def end(self):
387
+ "Cleans up the gradient state after exiting the dataloader"
388
+ self.gradient_state._remove_dataloader(self)
389
+
390
+
391
+ class DataLoaderShard(DataLoader, DataLoaderStateMixin):
392
+ """
393
+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
394
+
395
+ Args:
396
+ dataset (`torch.utils.data.dataset.Dataset`):
397
+ The dataset to use to build this datalaoder.
398
+ device (`torch.device`, *optional*):
399
+ If passed, the device to put all batches on.
400
+ rng_types (list of `str` or [`~utils.RNGType`]):
401
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
402
+ several of:
403
+
404
+ - `"torch"`: the base torch random number generator
405
+ - `"cuda"`: the CUDA random number generator (GPU only)
406
+ - `"xla"`: the XLA random number generator (TPU only)
407
+ - `"generator"`: an optional `torch.Generator`
408
+ synchronized_generator (`torch.Generator`, *optional*):
409
+ A random number generator to keep synchronized across processes.
410
+ skip_batches (`int`, *optional*, defaults to 0):
411
+ The number of batches to skip at the beginning.
412
+ **kwargs (additional keyword arguments, *optional*):
413
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
414
+
415
+ **Available attributes:**
416
+
417
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
418
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
419
+ number of processes
420
+
421
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
422
+ """
423
+
424
+ def __init__(
425
+ self,
426
+ dataset,
427
+ device=None,
428
+ rng_types=None,
429
+ synchronized_generator=None,
430
+ skip_batches=0,
431
+ _drop_last: bool = False,
432
+ **kwargs,
433
+ ):
434
+ super().__init__(dataset, **kwargs)
435
+ self.device = device
436
+ self.rng_types = rng_types
437
+ self.synchronized_generator = synchronized_generator
438
+ self.skip_batches = skip_batches
439
+ self.gradient_state = GradientState()
440
+ self._drop_last = _drop_last
441
+ self.iteration = 0
442
+
443
+ def __iter__(self):
444
+ if self.rng_types is not None:
445
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
446
+ self.begin()
447
+
448
+ self.set_epoch(self.iteration)
449
+ dataloader_iter = super().__iter__()
450
+ # We iterate one batch ahead to check when we are at the end
451
+ try:
452
+ current_batch = next(dataloader_iter)
453
+ except StopIteration:
454
+ yield
455
+
456
+ batch_index = 0
457
+ while True:
458
+ try:
459
+ # But we still move it to the device so it is done before `StopIteration` is reached
460
+ if self.device is not None:
461
+ current_batch = send_to_device(current_batch, self.device)
462
+ next_batch = next(dataloader_iter)
463
+ if batch_index >= self.skip_batches:
464
+ yield current_batch
465
+ batch_index += 1
466
+ current_batch = next_batch
467
+ except StopIteration:
468
+ self.end_of_dataloader = True
469
+ if batch_index >= self.skip_batches:
470
+ yield current_batch
471
+ break
472
+
473
+ self.iteration += 1
474
+ self.end()
475
+
476
+ def set_epoch(self, epoch: int):
477
+ # In case it is manually passed in, the user can set it to what they like
478
+ if self.iteration != epoch:
479
+ self.iteration = epoch
480
+ if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
481
+ self.batch_sampler.sampler.set_epoch(epoch)
482
+ # We support if a custom `Dataset` implementation has `set_epoch`
483
+ # or in general HF datasets `Datasets`
484
+ elif hasattr(self.dataset, "set_epoch"):
485
+ self.dataset.set_epoch(epoch)
486
+
487
+ @property
488
+ def total_batch_size(self):
489
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
490
+ return (
491
+ batch_sampler.batch_size
492
+ if getattr(batch_sampler, "split_batches", False)
493
+ else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
494
+ )
495
+
496
+ @property
497
+ def total_dataset_length(self):
498
+ if hasattr(self.dataset, "total_length"):
499
+ return self.dataset.total_length
500
+ else:
501
+ return len(self.dataset)
502
+
503
+
504
+ if is_torch_xla_available():
505
+ import torch_xla.distributed.parallel_loader as xpl
506
+
507
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
508
+ """
509
+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
510
+
511
+ XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
512
+ prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
513
+ thread only.
514
+
515
+ **Available attributes:**
516
+
517
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
518
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
519
+ number of processes
520
+
521
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
522
+ """
523
+
524
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
525
+ super().__init__(dataloader, device)
526
+ self._rng_types = self._loader.rng_types
527
+ self._loader.rng_types = None
528
+
529
+ def __iter__(self):
530
+ if self._rng_types is not None:
531
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
532
+
533
+ return super().__iter__()
534
+
535
+ @property
536
+ def total_batch_size(self):
537
+ return self._loader.total_batch_size
538
+
539
+ @property
540
+ def total_dataset_length(self):
541
+ return self._loader.total_dataset_length
542
+
543
+ @property
544
+ def batch_sampler(self):
545
+ return self._loader.batch_sampler
546
+
547
+
548
+ class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
549
+ """
550
+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
551
+ process their part of the batch.
552
+
553
+ Args:
554
+ split_batches (`bool`, *optional*, defaults to `False`):
555
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
556
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
557
+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be
558
+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
559
+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
560
+ size of the `dataloader` is a round multiple of `batch_size`.
561
+ skip_batches (`int`, *optional*, defaults to 0):
562
+ The number of batches to skip at the beginning of an iteration.
563
+
564
+ **Available attributes:**
565
+
566
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
567
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
568
+ number of processes
569
+
570
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
571
+ """
572
+
573
+ def __init__(
574
+ self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
575
+ ):
576
+ shuffle = False
577
+ if is_torch_version(">=", "1.11.0"):
578
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
579
+
580
+ # We need to save the shuffling state of the DataPipe
581
+ if isinstance(dataset, ShufflerIterDataPipe):
582
+ shuffle = dataset._shuffle_enabled
583
+ super().__init__(dataset, **kwargs)
584
+ self.split_batches = split_batches
585
+ if shuffle:
586
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
587
+
588
+ self.gradient_state = GradientState()
589
+ self.state = AcceleratorState()
590
+ self._drop_last = _drop_last
591
+ self.skip_batches = skip_batches
592
+
593
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
594
+ self.iteration = 0
595
+
596
+ def _fetch_batches(self, iterator):
597
+ batches, batch = None, None
598
+ # On process 0, we gather the batch to dispatch.
599
+ if self.state.process_index == 0:
600
+ try:
601
+ if self.split_batches:
602
+ # One batch of the main iterator is dispatched and split.
603
+ batch = next(iterator)
604
+ else:
605
+ # num_processes batches of the main iterator are concatenated then dispatched and split.
606
+ # We add the batches one by one so we have the remainder available when drop_last=False.
607
+ batches = []
608
+ for _ in range(self.state.num_processes):
609
+ batches.append(next(iterator))
610
+ try:
611
+ batch = concatenate(batches, dim=0)
612
+ except RuntimeError as e:
613
+ raise RuntimeError(
614
+ "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`."
615
+ "either pass `dispatch_batches=False` and have each process fetch its own batch "
616
+ " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and "
617
+ "slice it into `num_processes` batches for each process."
618
+ ) from e
619
+ # In both cases, we need to get the structure of the batch that we will broadcast on other
620
+ # processes to initialize the tensors with the right shape.
621
+ # data_structure, stop_iteration
622
+ batch_info = [get_data_structure(batch), False]
623
+ except StopIteration:
624
+ batch_info = [None, True]
625
+ else:
626
+ batch_info = [None, self._stop_iteration]
627
+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
628
+ broadcast_object_list(batch_info)
629
+ self._stop_iteration = batch_info[1]
630
+ if self._stop_iteration:
631
+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.
632
+ if not self.split_batches and not self._drop_last:
633
+ if self.state.process_index == 0 and len(batches) > 0:
634
+ batch = concatenate(batches, dim=0)
635
+ batch_info = [get_data_structure(batch), False]
636
+ else:
637
+ batch_info = [None, True]
638
+ broadcast_object_list(batch_info)
639
+ return batch, batch_info
640
+
641
+ def __iter__(self):
642
+ self.begin()
643
+ self.set_epoch(self.iteration)
644
+ main_iterator = None
645
+ if is_torch_version(">=", "2.0.1"):
646
+ # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
647
+ # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.
648
+ # But, we only iterate through the DataLoader on process 0.
649
+ main_iterator = super().__iter__()
650
+ elif self.state.process_index == 0:
651
+ main_iterator = super().__iter__()
652
+ stop_iteration = False
653
+ self._stop_iteration = False
654
+ first_batch = None
655
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
656
+ batch_index = 0
657
+ while not stop_iteration:
658
+ batch, batch_info = next_batch, next_batch_info
659
+
660
+ if self.state.process_index != 0:
661
+ # Initialize tensors on other processes than process 0.
662
+ batch = initialize_tensors(batch_info[0])
663
+ batch = send_to_device(batch, self.state.device)
664
+ # Broadcast the batch before splitting it.
665
+ batch = broadcast(batch, from_process=0)
666
+
667
+ if not self._drop_last and first_batch is None:
668
+ # We keep at least num processes elements of the first batch to be able to complete the last batch
669
+ first_batch = self.slice_fn(
670
+ batch,
671
+ slice(0, self.state.num_processes),
672
+ process_index=self.state.process_index,
673
+ num_processes=self.state.num_processes,
674
+ )
675
+
676
+ if batch is None:
677
+ raise ValueError(
678
+ f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
679
+ )
680
+
681
+ observed_batch_size = find_batch_size(batch)
682
+ batch_size = observed_batch_size // self.state.num_processes
683
+
684
+ stop_iteration = self._stop_iteration
685
+ if not stop_iteration:
686
+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
687
+ # the dataloader since the number of batches is a round multiple of the number of processes.
688
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
689
+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
690
+ if self._stop_iteration and next_batch_info[0] is None:
691
+ stop_iteration = True
692
+
693
+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
694
+ # If the last batch is not complete, let's add the first batch to it.
695
+ batch = concatenate([batch, first_batch], dim=0)
696
+ # Batch size computation above is wrong, it's off by 1 so we fix it.
697
+ batch_size += 1
698
+
699
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
700
+ batch = self.slice_fn(
701
+ batch,
702
+ data_slice,
703
+ process_index=self.state.process_index,
704
+ num_processes=self.state.num_processes,
705
+ )
706
+
707
+ if stop_iteration:
708
+ self.end_of_dataloader = True
709
+ self.remainder = observed_batch_size
710
+ if batch_index >= self.skip_batches:
711
+ yield batch
712
+ batch_index += 1
713
+ self.iteration += 1
714
+ self.end()
715
+
716
+ def set_epoch(self, epoch: int):
717
+ # In case it is manually passed in, the user can set it to what they like
718
+ if self.iteration != epoch:
719
+ self.iteration = epoch
720
+ if hasattr(self.batch_sampler.sampler, "set_epoch"):
721
+ self.batch_sampler.sampler.set_epoch(epoch)
722
+ elif hasattr(self.dataset, "set_epoch"):
723
+ self.dataset.set_epoch(epoch)
724
+
725
+ def __len__(self):
726
+ whole_length = super().__len__()
727
+ if self.split_batches:
728
+ return whole_length
729
+ elif self._drop_last:
730
+ return whole_length // self.state.num_processes
731
+ else:
732
+ return math.ceil(whole_length / self.state.num_processes)
733
+
734
+ @property
735
+ def total_batch_size(self):
736
+ return (
737
+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
738
+ )
739
+
740
+ @property
741
+ def total_dataset_length(self):
742
+ return len(self.dataset)
743
+
744
+
745
+ def prepare_data_loader(
746
+ dataloader: DataLoader,
747
+ device: Optional[torch.device] = None,
748
+ num_processes: Optional[int] = None,
749
+ process_index: Optional[int] = None,
750
+ split_batches: bool = False,
751
+ put_on_device: bool = False,
752
+ rng_types: Optional[List[Union[str, RNGType]]] = None,
753
+ dispatch_batches: Optional[bool] = None,
754
+ even_batches: bool = True,
755
+ slice_fn_for_dispatch: Optional[Callable] = None,
756
+ use_seedable_sampler: bool = False,
757
+ ) -> DataLoader:
758
+ """
759
+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
760
+
761
+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
762
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
763
+
764
+ Args:
765
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
766
+ The data loader to split across several devices.
767
+ device (`torch.device`):
768
+ The target device for the returned `DataLoader`.
769
+ num_processes (`int`, *optional*):
770
+ The number of processes running concurrently. Will default to the value given by
771
+ [`~state.AcceleratorState`].
772
+ process_index (`int`, *optional*):
773
+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
774
+ split_batches (`bool`, *optional*, defaults to `False`):
775
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
776
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
777
+ `num_processes` batches at each iteration).
778
+
779
+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
780
+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
781
+ otherwise.
782
+
783
+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
784
+ `batch_size`.
785
+ put_on_device (`bool`, *optional*, defaults to `False`):
786
+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
787
+ dictionaries of tensors).
788
+ rng_types (list of `str` or [`~utils.RNGType`]):
789
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
790
+ several of:
791
+
792
+ - `"torch"`: the base torch random number generator
793
+ - `"cuda"`: the CUDA random number generator (GPU only)
794
+ - `"xla"`: the XLA random number generator (TPU only)
795
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
796
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
797
+
798
+ dispatch_batches (`bool`, *optional*):
799
+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
800
+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an
801
+ `IterableDataset`, `False` otherwise.
802
+ even_batches (`bool`, *optional*, defaults to `True`):
803
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
804
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
805
+ all workers.
806
+ slice_fn_for_dispatch (`Callable`, *optional*`):
807
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
808
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
809
+ ignored otherwise.
810
+ use_seedable_sampler (`bool`, *optional*, defaults to `False`):
811
+ Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
812
+ reproducability. Comes at a cost of potentially different performances due to different shuffling
813
+ algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
814
+ `self.set_epoch`
815
+
816
+ Returns:
817
+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
818
+
819
+ <Tip warning={true}>
820
+
821
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
822
+ equal to `False`
823
+
824
+ </Tip>
825
+ """
826
+ if dispatch_batches is None:
827
+ if not put_on_device:
828
+ dispatch_batches = False
829
+ else:
830
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
831
+
832
+ if dispatch_batches and not put_on_device:
833
+ raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
834
+ # Grab defaults from AcceleratorState
835
+ state = AcceleratorState()
836
+ if num_processes is None:
837
+ num_processes = state.num_processes
838
+ if process_index is None:
839
+ process_index = state.process_index
840
+
841
+ # Sanity check
842
+ if split_batches:
843
+ if dataloader.batch_size is not None:
844
+ batch_size_for_check = dataloader.batch_size
845
+ else:
846
+ # For custom batch_sampler
847
+ if hasattr(dataloader.batch_sampler, "batch_size"):
848
+ batch_size_for_check = dataloader.batch_sampler.batch_size
849
+ else:
850
+ raise ValueError(
851
+ "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
852
+ "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
853
+ "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
854
+ f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
855
+ )
856
+
857
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
858
+ raise ValueError(
859
+ f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
860
+ f"needs to be a round multiple of the number of processes ({num_processes})."
861
+ )
862
+
863
+ new_dataset = dataloader.dataset
864
+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
865
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
866
+ sampler_is_batch_sampler = False
867
+ synchronized_generator = None
868
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
869
+ if sampler_is_batch_sampler:
870
+ sampler = getattr(dataloader.sampler, "sampler", None)
871
+ else:
872
+ sampler = getattr(dataloader.batch_sampler, "sampler", None)
873
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
874
+ # When iterating through the dataloader during distributed processes
875
+ # we want to ensure that on each process we are iterating through the same
876
+ # samples in the same order if a seed is set. This requires a tweak
877
+ # to the `torch.utils.data.RandomSampler` class (if used).
878
+ sampler = SeedableRandomSampler(
879
+ data_source=sampler.data_source,
880
+ replacement=sampler.replacement,
881
+ num_samples=sampler._num_samples,
882
+ generator=getattr(sampler, "generator", torch.Generator()),
883
+ )
884
+
885
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
886
+ # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
887
+ generator = torch.Generator().manual_seed(42)
888
+ dataloader.generator = generator
889
+ dataloader.sampler.generator = generator
890
+ # No change if no multiprocess
891
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
892
+ if isinstance(new_dataset, IterableDataset):
893
+ if getattr(dataloader.dataset, "generator", None) is not None:
894
+ synchronized_generator = dataloader.dataset.generator
895
+ new_dataset = IterableDatasetShard(
896
+ new_dataset,
897
+ batch_size=dataloader.batch_size,
898
+ drop_last=dataloader.drop_last,
899
+ num_processes=num_processes,
900
+ process_index=process_index,
901
+ split_batches=split_batches,
902
+ )
903
+ else:
904
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
905
+ new_batch_sampler = BatchSamplerShard(
906
+ batch_sampler,
907
+ num_processes=num_processes,
908
+ process_index=process_index,
909
+ split_batches=split_batches,
910
+ even_batches=even_batches,
911
+ )
912
+
913
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
914
+ ignore_kwargs = [
915
+ "batch_size",
916
+ "shuffle",
917
+ "sampler",
918
+ "batch_sampler",
919
+ "drop_last",
920
+ ]
921
+
922
+ if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
923
+ rng_types.remove("generator")
924
+
925
+ kwargs = {
926
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
927
+ for k in _PYTORCH_DATALOADER_KWARGS
928
+ if k not in ignore_kwargs
929
+ }
930
+
931
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
932
+ if new_batch_sampler is None:
933
+ kwargs["drop_last"] = dataloader.drop_last
934
+ kwargs["batch_size"] = (
935
+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
936
+ )
937
+ if dispatch_batches:
938
+ kwargs.pop("generator")
939
+ dataloader = DataLoaderDispatcher(
940
+ new_dataset,
941
+ split_batches=split_batches,
942
+ batch_sampler=new_batch_sampler,
943
+ _drop_last=dataloader.drop_last,
944
+ slice_fn=slice_fn_for_dispatch,
945
+ **kwargs,
946
+ )
947
+ elif sampler_is_batch_sampler:
948
+ dataloader = DataLoaderShard(
949
+ new_dataset,
950
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
951
+ sampler=new_batch_sampler,
952
+ batch_size=dataloader.batch_size,
953
+ rng_types=rng_types,
954
+ _drop_last=dataloader.drop_last,
955
+ synchronized_generator=synchronized_generator,
956
+ **kwargs,
957
+ )
958
+ else:
959
+ dataloader = DataLoaderShard(
960
+ new_dataset,
961
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
962
+ batch_sampler=new_batch_sampler,
963
+ rng_types=rng_types,
964
+ synchronized_generator=synchronized_generator,
965
+ _drop_last=dataloader.drop_last,
966
+ **kwargs,
967
+ )
968
+
969
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
970
+ if sampler_is_batch_sampler:
971
+ dataloader.sampler.sampler = sampler
972
+ else:
973
+ dataloader.batch_sampler.sampler = sampler
974
+ if hasattr(dataloader.batch_sampler, "batch_sampler"):
975
+ dataloader.batch_sampler.batch_sampler.sampler = sampler
976
+ if state.distributed_type == DistributedType.XLA:
977
+ return MpDeviceLoaderWrapper(dataloader, device)
978
+ return dataloader
979
+
980
+
981
+ class SkipBatchSampler(BatchSampler):
982
+ """
983
+ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
984
+ """
985
+
986
+ def __init__(self, batch_sampler, skip_batches=0):
987
+ self.batch_sampler = batch_sampler
988
+ self.skip_batches = skip_batches
989
+
990
+ def __iter__(self):
991
+ for index, samples in enumerate(self.batch_sampler):
992
+ if index >= self.skip_batches:
993
+ yield samples
994
+
995
+ @property
996
+ def total_length(self):
997
+ return len(self.batch_sampler)
998
+
999
+ def __len__(self):
1000
+ return len(self.batch_sampler) - self.skip_batches
1001
+
1002
+
1003
+ class SkipDataLoader(DataLoader):
1004
+ """
1005
+ Subclass of a PyTorch `DataLoader` that will skip the first batches.
1006
+
1007
+ Args:
1008
+ dataset (`torch.utils.data.dataset.Dataset`):
1009
+ The dataset to use to build this datalaoder.
1010
+ skip_batches (`int`, *optional*, defaults to 0):
1011
+ The number of batches to skip at the beginning.
1012
+ kwargs:
1013
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
1014
+ """
1015
+
1016
+ def __init__(self, dataset, skip_batches=0, **kwargs):
1017
+ super().__init__(dataset, **kwargs)
1018
+ self.skip_batches = skip_batches
1019
+
1020
+ def __iter__(self):
1021
+ for index, batch in enumerate(super().__iter__()):
1022
+ if index >= self.skip_batches:
1023
+ yield batch
1024
+
1025
+
1026
+ def skip_first_batches(dataloader, num_batches=0):
1027
+ """
1028
+ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
1029
+ """
1030
+ dataset = dataloader.dataset
1031
+ sampler_is_batch_sampler = False
1032
+ if isinstance(dataset, IterableDataset):
1033
+ new_batch_sampler = None
1034
+ else:
1035
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
1036
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
1037
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
1038
+
1039
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
1040
+ ignore_kwargs = [
1041
+ "batch_size",
1042
+ "shuffle",
1043
+ "sampler",
1044
+ "batch_sampler",
1045
+ "drop_last",
1046
+ ]
1047
+
1048
+ kwargs = {
1049
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
1050
+ for k in _PYTORCH_DATALOADER_KWARGS
1051
+ if k not in ignore_kwargs
1052
+ }
1053
+
1054
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
1055
+ if new_batch_sampler is None:
1056
+ kwargs["drop_last"] = dataloader.drop_last
1057
+ kwargs["batch_size"] = dataloader.batch_size
1058
+
1059
+ if isinstance(dataloader, DataLoaderDispatcher):
1060
+ if new_batch_sampler is None:
1061
+ # Need to manually skip batches in the dataloader
1062
+ kwargs["skip_batches"] = num_batches
1063
+ dataloader = DataLoaderDispatcher(
1064
+ dataset,
1065
+ split_batches=dataloader.split_batches,
1066
+ batch_sampler=new_batch_sampler,
1067
+ _drop_last=dataloader._drop_last,
1068
+ **kwargs,
1069
+ )
1070
+ elif isinstance(dataloader, DataLoaderShard):
1071
+ if new_batch_sampler is None:
1072
+ # Need to manually skip batches in the dataloader
1073
+ kwargs["skip_batches"] = num_batches
1074
+ elif sampler_is_batch_sampler:
1075
+ kwargs["sampler"] = new_batch_sampler
1076
+ kwargs["batch_size"] = dataloader.batch_size
1077
+ else:
1078
+ kwargs["batch_sampler"] = new_batch_sampler
1079
+ dataloader = DataLoaderShard(
1080
+ dataset,
1081
+ device=dataloader.device,
1082
+ rng_types=dataloader.rng_types,
1083
+ synchronized_generator=dataloader.synchronized_generator,
1084
+ **kwargs,
1085
+ )
1086
+ else:
1087
+ if new_batch_sampler is None:
1088
+ # Need to manually skip batches in the dataloader
1089
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
1090
+ else:
1091
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
1092
+
1093
+ return dataloader
env-llmeval/lib/python3.10/site-packages/accelerate/hooks.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import functools
16
+ from typing import Dict, List, Mapping, Optional, Union
17
+
18
+ import torch
19
+ import torch.nn as nn
20
+
21
+ from .state import PartialState
22
+ from .utils import (
23
+ PrefixedDataset,
24
+ find_device,
25
+ named_module_tensors,
26
+ send_to_device,
27
+ set_module_tensor_to_device,
28
+ )
29
+ from .utils.modeling import get_non_persistent_buffers
30
+ from .utils.other import recursive_getattr
31
+
32
+
33
+ class ModelHook:
34
+ """
35
+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
36
+ with PyTorch existing hooks is that they get passed along the kwargs.
37
+
38
+ Class attribute:
39
+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
40
+ the `torch.no_grad()` context manager.
41
+ """
42
+
43
+ no_grad = False
44
+
45
+ def init_hook(self, module):
46
+ """
47
+ To be executed when the hook is attached to the module.
48
+
49
+ Args:
50
+ module (`torch.nn.Module`): The module attached to this hook.
51
+ """
52
+ return module
53
+
54
+ def pre_forward(self, module, *args, **kwargs):
55
+ """
56
+ To be executed just before the forward method of the model.
57
+
58
+ Args:
59
+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
60
+ args (`Tuple[Any]`): The positional arguments passed to the module.
61
+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
62
+
63
+ Returns:
64
+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
65
+ """
66
+ return args, kwargs
67
+
68
+ def post_forward(self, module, output):
69
+ """
70
+ To be executed just after the forward method of the model.
71
+
72
+ Args:
73
+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
74
+ output (`Any`): The output of the module.
75
+
76
+ Returns:
77
+ `Any`: The processed `output`.
78
+ """
79
+ return output
80
+
81
+ def detach_hook(self, module):
82
+ """
83
+ To be executed when the hook is detached from a module.
84
+
85
+ Args:
86
+ module (`torch.nn.Module`): The module detached from this hook.
87
+ """
88
+ return module
89
+
90
+
91
+ class SequentialHook(ModelHook):
92
+ """
93
+ A hook that can contain several hooks and iterates through them at each event.
94
+ """
95
+
96
+ def __init__(self, *hooks):
97
+ self.hooks = hooks
98
+
99
+ def init_hook(self, module):
100
+ for hook in self.hooks:
101
+ module = hook.init_hook(module)
102
+ return module
103
+
104
+ def pre_forward(self, module, *args, **kwargs):
105
+ for hook in self.hooks:
106
+ args, kwargs = hook.pre_forward(module, *args, **kwargs)
107
+ return args, kwargs
108
+
109
+ def post_forward(self, module, output):
110
+ for hook in self.hooks:
111
+ output = hook.post_forward(module, output)
112
+ return output
113
+
114
+ def detach_hook(self, module):
115
+ for hook in self.hooks:
116
+ module = hook.detach_hook(module)
117
+ return module
118
+
119
+
120
+ def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
121
+ """
122
+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
123
+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.
124
+
125
+ <Tip warning={true}>
126
+
127
+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
128
+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
129
+
130
+ </Tip>
131
+
132
+ Args:
133
+ module (`torch.nn.Module`):
134
+ The module to attach a hook to.
135
+ hook (`ModelHook`):
136
+ The hook to attach.
137
+ append (`bool`, *optional*, defaults to `False`):
138
+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.
139
+
140
+ Returns:
141
+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
142
+ be discarded).
143
+ """
144
+
145
+ if append and (getattr(module, "_hf_hook", None) is not None):
146
+ old_hook = module._hf_hook
147
+ remove_hook_from_module(module)
148
+ hook = SequentialHook(old_hook, hook)
149
+
150
+ if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
151
+ # If we already put some hook on this module, we replace it with the new one.
152
+ old_forward = module._old_forward
153
+ else:
154
+ old_forward = module.forward
155
+ module._old_forward = old_forward
156
+
157
+ module = hook.init_hook(module)
158
+ module._hf_hook = hook
159
+
160
+ def new_forward(module, *args, **kwargs):
161
+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
162
+ if module._hf_hook.no_grad:
163
+ with torch.no_grad():
164
+ output = module._old_forward(*args, **kwargs)
165
+ else:
166
+ output = module._old_forward(*args, **kwargs)
167
+ return module._hf_hook.post_forward(module, output)
168
+
169
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
170
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
171
+ if "GraphModuleImpl" in str(type(module)):
172
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
173
+ else:
174
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
175
+
176
+ return module
177
+
178
+
179
+ def remove_hook_from_module(module: nn.Module, recurse=False):
180
+ """
181
+ Removes any hook attached to a module via `add_hook_to_module`.
182
+
183
+ Args:
184
+ module (`torch.nn.Module`): The module to attach a hook to.
185
+ recurse (`bool`, **optional**): Whether to remove the hooks recursively
186
+
187
+ Returns:
188
+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
189
+ be discarded).
190
+ """
191
+
192
+ if hasattr(module, "_hf_hook"):
193
+ module._hf_hook.detach_hook(module)
194
+ delattr(module, "_hf_hook")
195
+
196
+ if hasattr(module, "_old_forward"):
197
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
198
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
199
+ if "GraphModuleImpl" in str(type(module)):
200
+ module.__class__.forward = module._old_forward
201
+ else:
202
+ module.forward = module._old_forward
203
+ delattr(module, "_old_forward")
204
+
205
+ if recurse:
206
+ for child in module.children():
207
+ remove_hook_from_module(child, recurse)
208
+
209
+ return module
210
+
211
+
212
+ class AlignDevicesHook(ModelHook):
213
+ """
214
+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
215
+ associated module, potentially offloading the weights after the forward pass.
216
+
217
+ Args:
218
+ execution_device (`torch.device`, *optional*):
219
+ The device on which inputs and model weights should be placed before the forward pass.
220
+ offload (`bool`, *optional*, defaults to `False`):
221
+ Whether or not the weights should be offloaded after the forward pass.
222
+ io_same_device (`bool`, *optional*, defaults to `False`):
223
+ Whether or not the output should be placed on the same device as the input was.
224
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
225
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
226
+ offload_buffers (`bool`, *optional*, defaults to `False`):
227
+ Whether or not to include the associated module's buffers when offloading.
228
+ place_submodules (`bool`, *optional*, defaults to `False`):
229
+ Whether to place the submodules on `execution_device` during the `init_hook` event.
230
+ """
231
+
232
+ def __init__(
233
+ self,
234
+ execution_device: Optional[Union[int, str, torch.device]] = None,
235
+ offload: bool = False,
236
+ io_same_device: bool = False,
237
+ weights_map: Optional[Mapping] = None,
238
+ offload_buffers: bool = False,
239
+ place_submodules: bool = False,
240
+ skip_keys: Optional[Union[str, List[str]]] = None,
241
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
242
+ ):
243
+ self.execution_device = execution_device
244
+ self.offload = offload
245
+ self.io_same_device = io_same_device
246
+ self.weights_map = weights_map
247
+ self.offload_buffers = offload_buffers
248
+ self.place_submodules = place_submodules
249
+ self.skip_keys = skip_keys
250
+
251
+ # Will contain the input device when `io_same_device=True`.
252
+ self.input_device = None
253
+ self.param_original_devices = {}
254
+ self.buffer_original_devices = {}
255
+ self.tied_params_names = set()
256
+
257
+ # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
258
+ # for tied weights already loaded on the target execution device.
259
+ self.tied_params_map = tied_params_map
260
+
261
+ def __repr__(self):
262
+ return (
263
+ f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
264
+ f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
265
+ f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
266
+ )
267
+
268
+ def init_hook(self, module):
269
+ # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
270
+ if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
271
+ self.tied_params_map = None
272
+
273
+ if not self.offload and self.execution_device is not None:
274
+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):
275
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
276
+ elif self.offload:
277
+ self.original_devices = {
278
+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
279
+ }
280
+ if self.weights_map is None:
281
+ self.weights_map = {
282
+ name: param.to("cpu")
283
+ for name, param in named_module_tensors(
284
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules
285
+ )
286
+ }
287
+ for name, _ in named_module_tensors(
288
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
289
+ ):
290
+ # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
291
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
292
+ # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
293
+ # to add on the fly pointers to `tied_params_map` in the pre_forward call.
294
+ if (
295
+ self.tied_params_map is not None
296
+ and recursive_getattr(module, name).data_ptr() in self.tied_params_map
297
+ ):
298
+ self.tied_params_names.add(name)
299
+
300
+ set_module_tensor_to_device(module, name, "meta")
301
+
302
+ if not self.offload_buffers and self.execution_device is not None:
303
+ for name, _ in module.named_buffers(recurse=self.place_submodules):
304
+ set_module_tensor_to_device(
305
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
306
+ )
307
+ elif self.offload_buffers and self.execution_device is not None:
308
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
309
+ set_module_tensor_to_device(
310
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
311
+ )
312
+
313
+ return module
314
+
315
+ def pre_forward(self, module, *args, **kwargs):
316
+ if self.io_same_device:
317
+ self.input_device = find_device([args, kwargs])
318
+ if self.offload:
319
+ self.tied_pointers_to_remove = set()
320
+
321
+ for name, _ in named_module_tensors(
322
+ module,
323
+ include_buffers=self.offload_buffers,
324
+ recurse=self.place_submodules,
325
+ remove_non_persistent=True,
326
+ ):
327
+ fp16_statistics = None
328
+ value = self.weights_map[name]
329
+ if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
330
+ if value.dtype == torch.int8:
331
+ fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
332
+
333
+ # In case we are using offloading with tied weights, we need to keep track of the offloaded weights
334
+ # that are loaded on device at this point, as we will need to remove them as well from the dictionary
335
+ # self.tied_params_map in order to allow to free memory.
336
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
337
+ self.tied_params_map[value.data_ptr()] = {}
338
+
339
+ if (
340
+ value is not None
341
+ and self.tied_params_map is not None
342
+ and value.data_ptr() in self.tied_params_map
343
+ and self.execution_device not in self.tied_params_map[value.data_ptr()]
344
+ ):
345
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
346
+
347
+ set_module_tensor_to_device(
348
+ module,
349
+ name,
350
+ self.execution_device,
351
+ value=value,
352
+ fp16_statistics=fp16_statistics,
353
+ tied_params_map=self.tied_params_map,
354
+ )
355
+
356
+ return send_to_device(args, self.execution_device), send_to_device(
357
+ kwargs, self.execution_device, skip_keys=self.skip_keys
358
+ )
359
+
360
+ def post_forward(self, module, output):
361
+ if self.offload:
362
+ for name, _ in named_module_tensors(
363
+ module,
364
+ include_buffers=self.offload_buffers,
365
+ recurse=self.place_submodules,
366
+ remove_non_persistent=True,
367
+ ):
368
+ set_module_tensor_to_device(module, name, "meta")
369
+ if type(module).__name__ == "Linear8bitLt":
370
+ module.state.SCB = None
371
+ module.state.CxB = None
372
+
373
+ # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
374
+ # this dictionary to allow the garbage collector to do its job.
375
+ for value_pointer, device in self.tied_pointers_to_remove:
376
+ del self.tied_params_map[value_pointer][device]
377
+ self.tied_pointers_to_remove = set()
378
+
379
+ if self.io_same_device and self.input_device is not None:
380
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
381
+
382
+ return output
383
+
384
+ def detach_hook(self, module):
385
+ if self.offload:
386
+ for name, device in self.original_devices.items():
387
+ if device != torch.device("meta"):
388
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
389
+ return module
390
+
391
+
392
+ def attach_execution_device_hook(
393
+ module: torch.nn.Module,
394
+ execution_device: Union[int, str, torch.device],
395
+ skip_keys: Optional[Union[str, List[str]]] = None,
396
+ preload_module_classes: Optional[List[str]] = None,
397
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
398
+ ):
399
+ """
400
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
401
+ execution device
402
+
403
+ Args:
404
+ module (`torch.nn.Module`):
405
+ The module where we want to attach the hooks.
406
+ execution_device (`int`, `str` or `torch.device`):
407
+ The device on which inputs and model weights should be placed before the forward pass.
408
+ skip_keys (`str` or `List[str]`, *optional*):
409
+ A list of keys to ignore when moving inputs or outputs between devices.
410
+ preload_module_classes (`List[str]`, *optional*):
411
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
412
+ of the forward. This should only be used for classes that have submodules which are registered but not
413
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
414
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
415
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
416
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
417
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
418
+ instead of duplicating memory.
419
+ """
420
+ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
421
+ add_hook_to_module(
422
+ module,
423
+ AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
424
+ )
425
+
426
+ # Break the recursion if we get to a preload module.
427
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
428
+ return
429
+
430
+ for child in module.children():
431
+ attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
432
+
433
+
434
+ def attach_align_device_hook(
435
+ module: torch.nn.Module,
436
+ execution_device: Optional[torch.device] = None,
437
+ offload: bool = False,
438
+ weights_map: Optional[Mapping] = None,
439
+ offload_buffers: bool = False,
440
+ module_name: str = "",
441
+ skip_keys: Optional[Union[str, List[str]]] = None,
442
+ preload_module_classes: Optional[List[str]] = None,
443
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
444
+ ):
445
+ """
446
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
447
+ buffers.
448
+
449
+ Args:
450
+ module (`torch.nn.Module`):
451
+ The module where we want to attach the hooks.
452
+ execution_device (`torch.device`, *optional*):
453
+ The device on which inputs and model weights should be placed before the forward pass.
454
+ offload (`bool`, *optional*, defaults to `False`):
455
+ Whether or not the weights should be offloaded after the forward pass.
456
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
457
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
458
+ offload_buffers (`bool`, *optional*, defaults to `False`):
459
+ Whether or not to include the associated module's buffers when offloading.
460
+ module_name (`str`, *optional*, defaults to `""`):
461
+ The name of the module.
462
+ skip_keys (`str` or `List[str]`, *optional*):
463
+ A list of keys to ignore when moving inputs or outputs between devices.
464
+ preload_module_classes (`List[str]`, *optional*):
465
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
466
+ of the forward. This should only be used for classes that have submodules which are registered but not
467
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
468
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
469
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
470
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
471
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
472
+ instead of duplicating memory.
473
+ """
474
+ # Attach the hook on this module if it has any direct tensor.
475
+ directs = named_module_tensors(module)
476
+ full_offload = (
477
+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
478
+ )
479
+
480
+ if len(list(directs)) > 0 or full_offload:
481
+ if weights_map is not None:
482
+ prefix = f"{module_name}." if len(module_name) > 0 else ""
483
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
484
+ else:
485
+ prefixed_weights_map = None
486
+ hook = AlignDevicesHook(
487
+ execution_device=execution_device,
488
+ offload=offload,
489
+ weights_map=prefixed_weights_map,
490
+ offload_buffers=offload_buffers,
491
+ place_submodules=full_offload,
492
+ skip_keys=skip_keys,
493
+ tied_params_map=tied_params_map,
494
+ )
495
+ add_hook_to_module(module, hook, append=True)
496
+
497
+ # We stop the recursion in case we hit the full offload.
498
+ if full_offload:
499
+ return
500
+
501
+ # Recurse on all children of the module.
502
+ for child_name, child in module.named_children():
503
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
504
+ attach_align_device_hook(
505
+ child,
506
+ execution_device=execution_device,
507
+ offload=offload,
508
+ weights_map=weights_map,
509
+ offload_buffers=offload_buffers,
510
+ module_name=child_name,
511
+ preload_module_classes=preload_module_classes,
512
+ skip_keys=skip_keys,
513
+ tied_params_map=tied_params_map,
514
+ )
515
+
516
+
517
+ def remove_hook_from_submodules(module: nn.Module):
518
+ """
519
+ Recursively removes all hooks attached on the submodules of a given model.
520
+
521
+ Args:
522
+ module (`torch.nn.Module`): The module on which to remove all hooks.
523
+ """
524
+ remove_hook_from_module(module)
525
+ for child in module.children():
526
+ remove_hook_from_submodules(child)
527
+
528
+
529
+ def attach_align_device_hook_on_blocks(
530
+ module: nn.Module,
531
+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
532
+ offload: Union[bool, Dict[str, bool]] = False,
533
+ weights_map: Mapping = None,
534
+ offload_buffers: bool = False,
535
+ module_name: str = "",
536
+ skip_keys: Optional[Union[str, List[str]]] = None,
537
+ preload_module_classes: Optional[List[str]] = None,
538
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
539
+ ):
540
+ """
541
+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.
542
+
543
+ Args:
544
+ module (`torch.nn.Module`):
545
+ The module where we want to attach the hooks.
546
+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
547
+ The device on which inputs and model weights should be placed before the forward pass. It can be one device
548
+ for the whole module, or a dictionary mapping module name to device.
549
+ offload (`bool`, *optional*, defaults to `False`):
550
+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
551
+ module, or a dictionary mapping module name to boolean.
552
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
553
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
554
+ offload_buffers (`bool`, *optional*, defaults to `False`):
555
+ Whether or not to include the associated module's buffers when offloading.
556
+ module_name (`str`, *optional*, defaults to `""`):
557
+ The name of the module.
558
+ skip_keys (`str` or `List[str]`, *optional*):
559
+ A list of keys to ignore when moving inputs or outputs between devices.
560
+ preload_module_classes (`List[str]`, *optional*):
561
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
562
+ of the forward. This should only be used for classes that have submodules which are registered but not
563
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
564
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
565
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
566
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
567
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
568
+ instead of duplicating memory.
569
+ """
570
+ # If one device and one offload, we've got one hook.
571
+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
572
+ if not offload:
573
+ hook = AlignDevicesHook(
574
+ execution_device=execution_device,
575
+ io_same_device=True,
576
+ skip_keys=skip_keys,
577
+ place_submodules=True,
578
+ tied_params_map=tied_params_map,
579
+ )
580
+ add_hook_to_module(module, hook)
581
+ else:
582
+ attach_align_device_hook(
583
+ module,
584
+ execution_device=execution_device,
585
+ offload=True,
586
+ weights_map=weights_map,
587
+ offload_buffers=offload_buffers,
588
+ module_name=module_name,
589
+ skip_keys=skip_keys,
590
+ tied_params_map=tied_params_map,
591
+ )
592
+ return
593
+
594
+ if not isinstance(execution_device, Mapping):
595
+ execution_device = {key: execution_device for key in offload.keys()}
596
+ if not isinstance(offload, Mapping):
597
+ offload = {key: offload for key in execution_device.keys()}
598
+
599
+ if module_name in execution_device and module_name in offload and not offload[module_name]:
600
+ hook = AlignDevicesHook(
601
+ execution_device=execution_device[module_name],
602
+ offload_buffers=offload_buffers,
603
+ io_same_device=(module_name == ""),
604
+ place_submodules=True,
605
+ skip_keys=skip_keys,
606
+ tied_params_map=tied_params_map,
607
+ )
608
+ add_hook_to_module(module, hook)
609
+ attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
610
+ elif module_name in execution_device and module_name in offload:
611
+ attach_align_device_hook(
612
+ module,
613
+ execution_device=execution_device[module_name],
614
+ offload=True,
615
+ weights_map=weights_map,
616
+ offload_buffers=offload_buffers,
617
+ module_name=module_name,
618
+ skip_keys=skip_keys,
619
+ preload_module_classes=preload_module_classes,
620
+ tied_params_map=tied_params_map,
621
+ )
622
+ if not hasattr(module, "_hf_hook"):
623
+ hook = AlignDevicesHook(
624
+ execution_device=execution_device[module_name],
625
+ io_same_device=(module_name == ""),
626
+ skip_keys=skip_keys,
627
+ tied_params_map=tied_params_map,
628
+ )
629
+ add_hook_to_module(module, hook)
630
+ attach_execution_device_hook(
631
+ module,
632
+ execution_device[module_name],
633
+ preload_module_classes=preload_module_classes,
634
+ skip_keys=skip_keys,
635
+ tied_params_map=tied_params_map,
636
+ )
637
+ elif module_name == "":
638
+ hook = AlignDevicesHook(
639
+ execution_device=execution_device.get(""),
640
+ io_same_device=True,
641
+ skip_keys=skip_keys,
642
+ tied_params_map=tied_params_map,
643
+ )
644
+ add_hook_to_module(module, hook)
645
+
646
+ for child_name, child in module.named_children():
647
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
648
+ attach_align_device_hook_on_blocks(
649
+ child,
650
+ execution_device=execution_device,
651
+ offload=offload,
652
+ weights_map=weights_map,
653
+ offload_buffers=offload_buffers,
654
+ module_name=child_name,
655
+ preload_module_classes=preload_module_classes,
656
+ skip_keys=skip_keys,
657
+ tied_params_map=tied_params_map,
658
+ )
659
+
660
+
661
+ class CpuOffload(ModelHook):
662
+ """
663
+ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
664
+ the forward, the user needs to call the `init_hook` method again for this.
665
+
666
+ Args:
667
+ execution_device(`str`, `int` or `torch.device`, *optional*):
668
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
669
+ GPU 0 if there is a GPU, and finally to the CPU.
670
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
671
+ The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
672
+ passed, its offload method will be called just before the forward of the model to which this hook is
673
+ attached.
674
+ """
675
+
676
+ def __init__(
677
+ self,
678
+ execution_device: Optional[Union[str, int, torch.device]] = None,
679
+ prev_module_hook: Optional["UserCpuOffloadHook"] = None,
680
+ ):
681
+ self.prev_module_hook = prev_module_hook
682
+
683
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
684
+
685
+ def init_hook(self, module):
686
+ return module.to("cpu")
687
+
688
+ def pre_forward(self, module, *args, **kwargs):
689
+ if self.prev_module_hook is not None:
690
+ self.prev_module_hook.offload()
691
+ module.to(self.execution_device)
692
+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
693
+
694
+
695
+ class UserCpuOffloadHook:
696
+ """
697
+ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
698
+ or remove it entirely.
699
+ """
700
+
701
+ def __init__(self, model, hook):
702
+ self.model = model
703
+ self.hook = hook
704
+
705
+ def offload(self):
706
+ self.hook.init_hook(self.model)
707
+
708
+ def remove(self):
709
+ remove_hook_from_module(self.model)
env-llmeval/lib/python3.10/site-packages/accelerate/inference.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from types import MethodType
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+
18
+ from .state import PartialState
19
+ from .utils import (
20
+ calculate_maximum_sizes,
21
+ convert_bytes,
22
+ copy_tensor_to_devices,
23
+ ignorant_find_batch_size,
24
+ infer_auto_device_map,
25
+ is_pippy_available,
26
+ pad_input_tensors,
27
+ send_to_device,
28
+ )
29
+
30
+
31
+ if is_pippy_available():
32
+ from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
33
+ from pippy.PipelineStage import PipelineStage
34
+
35
+
36
+ def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
37
+ """
38
+ Calculates the device map for `model` with an offset for PiPPy
39
+ """
40
+ if num_processes == 1:
41
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
42
+ if max_memory is None:
43
+ model_size, shared = calculate_maximum_sizes(model)
44
+
45
+ # Split into `n` chunks for each GPU
46
+ memory = (model_size + shared[0]) / num_processes
47
+ memory = convert_bytes(memory)
48
+ value, ending = memory.split(" ")
49
+
50
+ # Add a chunk to deal with potential extra shared memory instances
51
+ memory = math.ceil(float(value)) * 1.1
52
+ memory = f"{memory} {ending}"
53
+ max_memory = {i: memory for i in range(num_processes)}
54
+ device_map = infer_auto_device_map(
55
+ model,
56
+ max_memory=max_memory,
57
+ no_split_module_classes=no_split_module_classes,
58
+ clean_result=False,
59
+ )
60
+ return device_map
61
+
62
+
63
+ def find_pippy_batch_size(args, kwargs):
64
+ found_batch_size = None
65
+ if args is not None:
66
+ for arg in args:
67
+ found_batch_size = ignorant_find_batch_size(arg)
68
+ if found_batch_size is not None:
69
+ break
70
+ if kwargs is not None and found_batch_size is None:
71
+ for kwarg in kwargs.values():
72
+ found_batch_size = ignorant_find_batch_size(kwarg)
73
+ if found_batch_size is not None:
74
+ break
75
+ return found_batch_size
76
+
77
+
78
+ def build_pipeline(model, split_points, args, kwargs, num_chunks):
79
+ """
80
+ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
81
+ in needed `args` and `kwargs` as the model needs on the CPU.
82
+
83
+ Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
84
+ `AcceleratorState.num_processes`
85
+ """
86
+ # We need to annotate the split points in the model for PiPPy
87
+ state = PartialState()
88
+ annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
89
+ found_batch_size = find_pippy_batch_size(args, kwargs)
90
+ if found_batch_size != num_chunks:
91
+ if args is not None:
92
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
93
+ if kwargs is not None:
94
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
95
+ pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
96
+ stage = PipelineStage(pipe, state.local_process_index, device=state.device)
97
+
98
+ return stage
99
+
100
+
101
+ def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
102
+ state = PartialState()
103
+ output = None
104
+
105
+ if state.num_processes == 1:
106
+ output = forward(*args, **kwargs)
107
+ elif state.is_local_main_process:
108
+ found_batch_size = find_pippy_batch_size(args, kwargs)
109
+ if found_batch_size is None:
110
+ raise ValueError("Could not find batch size from args or kwargs")
111
+ else:
112
+ if found_batch_size != num_chunks:
113
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
114
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
115
+ forward(*args, **kwargs)
116
+ elif state.is_last_process:
117
+ output = forward()
118
+ else:
119
+ forward()
120
+ if gather_output:
121
+ # Each node will get a copy of the full output which is only on the last GPU
122
+ output = copy_tensor_to_devices(output)
123
+ return output
124
+
125
+
126
+ def prepare_pippy(
127
+ model,
128
+ split_points: Optional[Union[str, List[str]]] = "auto",
129
+ no_split_module_classes: Optional[List[str]] = None,
130
+ example_args: Optional[Tuple[Any]] = (),
131
+ example_kwargs: Optional[Dict[str, Any]] = None,
132
+ num_chunks: Optional[int] = None,
133
+ gather_output: Optional[bool] = False,
134
+ ):
135
+ """
136
+ Wraps `model` for pipeline parallel inference.
137
+
138
+ Args:
139
+ model (`torch.nn.Module`):
140
+ A model we want to split for pipeline-parallel inference
141
+ split_points (`str` or `List[str]`, defaults to 'auto'):
142
+ How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
143
+ split given any model. Should be a list of layer names in the model to split by otherwise.
144
+ no_split_module_classes (`List[str]`):
145
+ A list of class names for layers we don't want to be split.
146
+ example_args (tuple of model inputs):
147
+ The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
148
+ example_kwargs (dict of model inputs)
149
+ The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
150
+ that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
151
+ is true for all cases.
152
+ num_chunks (`int`, defaults to the number of available GPUs):
153
+ The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
154
+ this can be tuned and played with. In general one should have num_chunks >= num_gpus.
155
+ gather_output (`bool`, defaults to `False`):
156
+ If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
157
+ """
158
+ if not is_pippy_available():
159
+ raise ImportError(
160
+ "`pippy` was not found to be installed on your system. Please "
161
+ "install using `pip install torchpippy` or ensure you have at least version 0.2.0"
162
+ )
163
+ state = PartialState()
164
+ example_args = send_to_device(example_args, "cpu")
165
+ example_kwargs = send_to_device(example_kwargs, "cpu")
166
+ if num_chunks is None:
167
+ num_chunks = state.num_processes
168
+ if split_points == "auto":
169
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
170
+ split_points = []
171
+ for i in range(1, num_chunks):
172
+ split_points.append(next(k for k, v in device_map.items() if v == i))
173
+ model.hf_split_points = split_points
174
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
175
+ model._original_forward = model.forward
176
+ model._original_call = model.__call__
177
+ model.pippy_stage = stage
178
+ model.hf_split_points = split_points
179
+
180
+ def forward(*args, **kwargs):
181
+ return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
182
+
183
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
184
+ # Note: creates an infinite recursion loop with `generate`
185
+ model_forward = MethodType(forward, model)
186
+ forward.__wrapped__ = model_forward
187
+ model.forward = forward
188
+ return model