diff --git a/.gitattributes b/.gitattributes
index 85b524b66e955770ea324a3065d759ab7e13c171..a5b0d24782bc11766ea2ea248df63cf7c0045a86 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -115,3 +115,5 @@ llmeval-env/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
+env-llmeval/bin/python3 filter=lfs diff=lfs merge=lfs -text
+env-llmeval/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
diff --git a/env-llmeval/bin/f2py b/env-llmeval/bin/f2py
new file mode 100644
index 0000000000000000000000000000000000000000..6e07abc375e5938ffdd3257ea4769ec9187c354e
--- /dev/null
+++ b/env-llmeval/bin/f2py
@@ -0,0 +1,8 @@
+#!/home/sdp/llm_eval/env-llmeval/bin/python3
+# -*- coding: utf-8 -*-
+import re
+import sys
+from numpy.f2py.f2py2e import main
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+ sys.exit(main())
diff --git a/env-llmeval/bin/python3 b/env-llmeval/bin/python3
new file mode 100644
index 0000000000000000000000000000000000000000..64e8728adfafdb3e95b983fb0960f9a09cd3bac9
--- /dev/null
+++ b/env-llmeval/bin/python3
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91
+size 5904904
diff --git a/env-llmeval/bin/python3.10 b/env-llmeval/bin/python3.10
new file mode 100644
index 0000000000000000000000000000000000000000..64e8728adfafdb3e95b983fb0960f9a09cd3bac9
--- /dev/null
+++ b/env-llmeval/bin/python3.10
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:45692c3da2492563eabf0a8f5dc18d20dc9c34ffe3a18202563e00bae684be91
+size 5904904
diff --git a/env-llmeval/lib/python3.10/site-packages/__editable___lm_eval_0_4_2_finder.py b/env-llmeval/lib/python3.10/site-packages/__editable___lm_eval_0_4_2_finder.py
new file mode 100644
index 0000000000000000000000000000000000000000..05a04d78a98ec64e709f2afe812bbf40a84191d7
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/__editable___lm_eval_0_4_2_finder.py
@@ -0,0 +1,79 @@
+import sys
+from importlib.machinery import ModuleSpec, PathFinder
+from importlib.machinery import all_suffixes as module_suffixes
+from importlib.util import spec_from_file_location
+from itertools import chain
+from pathlib import Path
+
+MAPPING = {'lm_eval': '/home/sdp/llm_eval/lm-evaluation/lm_eval'}
+NAMESPACES = {'lm_eval.caching': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/caching'], 'lm_eval.tasks.agieval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/agieval'], 'lm_eval.tasks.openbookqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/openbookqa'], 'lm_eval.tasks.aexams': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/aexams'], 'lm_eval.tasks.wmdp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wmdp'], 'lm_eval.tasks.blimp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/blimp'], 'lm_eval.tasks.swag': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/swag'], 'lm_eval.tasks.bigbench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench'], 'lm_eval.tasks.lambada': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada'], 'lm_eval.tasks.hellaswag': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/hellaswag'], 'lm_eval.tasks.mgsm': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm'], 'lm_eval.tasks.xwinograd': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xwinograd'], 'lm_eval.tasks.tmmluplus': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/tmmluplus'], 'lm_eval.tasks.babi': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/babi'], 'lm_eval.tasks.xstorycloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xstorycloze'], 'lm_eval.tasks.haerae': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/haerae'], 'lm_eval.tasks.model_written_evals': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals'], 'lm_eval.tasks.kmmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu'], 'lm_eval.tasks.arithmetic': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/arithmetic'], 'lm_eval.tasks.gsm8k': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gsm8k'], 'lm_eval.tasks.prost': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/prost'], 'lm_eval.tasks.basqueglue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/basqueglue'], 'lm_eval.tasks.drop': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/drop'], 'lm_eval.tasks.french_bench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/french_bench'], 'lm_eval.tasks.race': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/race'], 'lm_eval.tasks.medmcqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/medmcqa'], 'lm_eval.tasks.eus_exams': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_exams'], 'lm_eval.tasks.scrolls': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/scrolls'], 'lm_eval.tasks.arc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/arc'], 'lm_eval.tasks.eus_proficiency': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_proficiency'], 'lm_eval.tasks.bbh': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh'], 'lm_eval.tasks.pile': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/pile'], 'lm_eval.tasks.headqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/headqa'], 'lm_eval.tasks.kobest': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kobest'], 'lm_eval.tasks.wsc273': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wsc273'], 'lm_eval.tasks.siqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/siqa'], 'lm_eval.tasks.sciq': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/sciq'], 'lm_eval.tasks.wmt2016': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wmt2016'], 'lm_eval.tasks.wikitext': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/wikitext'], 'lm_eval.tasks.minerva_math': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/minerva_math'], 'lm_eval.tasks.paws-x': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/paws-x'], 'lm_eval.tasks.lambada_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada_multilingual'], 'lm_eval.tasks.triviaqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/triviaqa'], 'lm_eval.tasks.xnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xnli'], 'lm_eval.tasks.code_x_glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/code_x_glue'], 'lm_eval.tasks.qa4mre': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/qa4mre'], 'lm_eval.tasks.ifeval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ifeval'], 'lm_eval.tasks.cmmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/cmmlu'], 'lm_eval.tasks.medqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/medqa'], 'lm_eval.tasks.lambada_cloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/lambada_cloze'], 'lm_eval.tasks.translation': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/translation'], 'lm_eval.tasks.nq_open': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/nq_open'], 'lm_eval.tasks.hendrycks_ethics': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/hendrycks_ethics'], 'lm_eval.tasks.okapi': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi'], 'lm_eval.tasks.crows_pairs': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/crows_pairs'], 'lm_eval.tasks.gpqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa'], 'lm_eval.tasks.asdiv': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/asdiv'], 'lm_eval.tasks.ceval': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ceval'], 'lm_eval.tasks.eus_trivia': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_trivia'], 'lm_eval.tasks.eq_bench': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eq_bench'], 'lm_eval.tasks.polemo2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/polemo2'], 'lm_eval.tasks.glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue'], 'lm_eval.tasks.csatqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/csatqa'], 'lm_eval.tasks.qasper': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/qasper'], 'lm_eval.tasks.eus_reading': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/eus_reading'], 'lm_eval.tasks.logiqa2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/logiqa2'], 'lm_eval.tasks.super_glue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue'], 'lm_eval.tasks.aclue': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/aclue'], 'lm_eval.tasks.piqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/piqa'], 'lm_eval.tasks.mc_taco': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mc_taco'], 'lm_eval.tasks.benchmarks': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks'], 'lm_eval.tasks.truthfulqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/truthfulqa'], 'lm_eval.tasks.logiqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/logiqa'], 'lm_eval.tasks.mmlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu'], 'lm_eval.tasks.coqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/coqa'], 'lm_eval.tasks.squadv2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/squadv2'], 'lm_eval.tasks.belebele': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/belebele'], 'lm_eval.tasks.fld': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/fld'], 'lm_eval.tasks.winogrande': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/winogrande'], 'lm_eval.tasks.mutual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mutual'], 'lm_eval.tasks.webqs': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/webqs'], 'lm_eval.tasks.unscramble': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/unscramble'], 'lm_eval.tasks.realtoxicityprompts': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/realtoxicityprompts'], 'lm_eval.tasks.storycloze': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/storycloze'], 'lm_eval.tasks.anli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/anli'], 'lm_eval.tasks.mathqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mathqa'], 'lm_eval.tasks.ammlu': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/ammlu'], 'lm_eval.tasks.pubmedqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/pubmedqa'], 'lm_eval.tasks.xcopa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/xcopa'], 'lm_eval.tasks.toxigen': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/toxigen'], 'lm_eval.tasks.kormedmcqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kormedmcqa'], 'lm_eval.tasks.bigbench.generate_until': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench/generate_until'], 'lm_eval.tasks.bigbench.multiple_choice': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice'], 'lm_eval.tasks.mgsm.direct': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/direct'], 'lm_eval.tasks.mgsm.native_cot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/native_cot'], 'lm_eval.tasks.mgsm.en_cot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mgsm/en_cot'], 'lm_eval.tasks.tmmluplus.default': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/tmmluplus/default'], 'lm_eval.tasks.model_written_evals.persona': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/persona'], 'lm_eval.tasks.model_written_evals.sycophancy': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/sycophancy'], 'lm_eval.tasks.model_written_evals.advanced_ai_risk': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/advanced_ai_risk'], 'lm_eval.tasks.model_written_evals.winogenerated': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/model_written_evals/winogenerated'], 'lm_eval.tasks.kmmlu.direct': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/direct'], 'lm_eval.tasks.kmmlu.direct_hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/direct_hard'], 'lm_eval.tasks.kmmlu.cot_hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/cot_hard'], 'lm_eval.tasks.kmmlu.hard': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/kmmlu/hard'], 'lm_eval.tasks.bbh.cot_fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/cot_fewshot'], 'lm_eval.tasks.bbh.fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/fewshot'], 'lm_eval.tasks.bbh.cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/cot_zeroshot'], 'lm_eval.tasks.bbh.zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/bbh/zeroshot'], 'lm_eval.tasks.code_x_glue.code-text': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/code_x_glue/code-text'], 'lm_eval.tasks.okapi.arc_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/arc_multilingual'], 'lm_eval.tasks.okapi.mmlu_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/mmlu_multilingual'], 'lm_eval.tasks.okapi.hellaswag_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/hellaswag_multilingual'], 'lm_eval.tasks.okapi.truthfulqa_multilingual': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/okapi/truthfulqa_multilingual'], 'lm_eval.tasks.gpqa.generative': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/generative'], 'lm_eval.tasks.gpqa.n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/n_shot'], 'lm_eval.tasks.gpqa.cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/cot_zeroshot'], 'lm_eval.tasks.gpqa.cot_n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/cot_n_shot'], 'lm_eval.tasks.gpqa.zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/gpqa/zeroshot'], 'lm_eval.tasks.glue.mrpc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/mrpc'], 'lm_eval.tasks.glue.qqp': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/qqp'], 'lm_eval.tasks.glue.rte': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/rte'], 'lm_eval.tasks.glue.sst2': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/sst2'], 'lm_eval.tasks.glue.mnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/mnli'], 'lm_eval.tasks.glue.qnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/qnli'], 'lm_eval.tasks.glue.cola': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/cola'], 'lm_eval.tasks.glue.wnli': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/glue/wnli'], 'lm_eval.tasks.super_glue.multirc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/multirc'], 'lm_eval.tasks.super_glue.wic': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/wic'], 'lm_eval.tasks.super_glue.record': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/record'], 'lm_eval.tasks.super_glue.rte': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/rte'], 'lm_eval.tasks.super_glue.wsc': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/wsc'], 'lm_eval.tasks.super_glue.cb': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/cb'], 'lm_eval.tasks.super_glue.boolq': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/boolq'], 'lm_eval.tasks.super_glue.copa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/super_glue/copa'], 'lm_eval.tasks.benchmarks.flan': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks/flan'], 'lm_eval.tasks.benchmarks.multimedqa': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/benchmarks/multimedqa'], 'lm_eval.tasks.mmlu.flan_cot_zeroshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_zeroshot'], 'lm_eval.tasks.mmlu.default': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/default'], 'lm_eval.tasks.mmlu.flan_cot_fewshot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_cot_fewshot'], 'lm_eval.tasks.mmlu.flan_n_shot': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot'], 'lm_eval.tasks.mmlu.flan_n_shot.loglikelihood': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood'], 'lm_eval.tasks.mmlu.flan_n_shot.generative': ['/home/sdp/llm_eval/lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/generative']}
+PATH_PLACEHOLDER = '__editable__.lm_eval-0.4.2.finder' + ".__path_hook__"
+
+
+class _EditableFinder: # MetaPathFinder
+ @classmethod
+ def find_spec(cls, fullname, path=None, target=None):
+ extra_path = []
+
+ # Top-level packages and modules (we know these exist in the FS)
+ if fullname in MAPPING:
+ pkg_path = MAPPING[fullname]
+ return cls._find_spec(fullname, Path(pkg_path))
+
+ # Handle immediate children modules (required for namespaces to work)
+ # To avoid problems with case sensitivity in the file system we delegate
+ # to the importlib.machinery implementation.
+ parent, _, child = fullname.rpartition(".")
+ if parent and parent in MAPPING:
+ return PathFinder.find_spec(fullname, path=[MAPPING[parent], *extra_path])
+
+ # Other levels of nesting should be handled automatically by importlib
+ # using the parent path.
+ return None
+
+ @classmethod
+ def _find_spec(cls, fullname, candidate_path):
+ init = candidate_path / "__init__.py"
+ candidates = (candidate_path.with_suffix(x) for x in module_suffixes())
+ for candidate in chain([init], candidates):
+ if candidate.exists():
+ return spec_from_file_location(fullname, candidate)
+
+
+class _EditableNamespaceFinder: # PathEntryFinder
+ @classmethod
+ def _path_hook(cls, path):
+ if path == PATH_PLACEHOLDER:
+ return cls
+ raise ImportError
+
+ @classmethod
+ def _paths(cls, fullname):
+ # Ensure __path__ is not empty for the spec to be considered a namespace.
+ return NAMESPACES[fullname] or MAPPING.get(fullname) or [PATH_PLACEHOLDER]
+
+ @classmethod
+ def find_spec(cls, fullname, target=None):
+ if fullname in NAMESPACES:
+ spec = ModuleSpec(fullname, None, is_package=True)
+ spec.submodule_search_locations = cls._paths(fullname)
+ return spec
+ return None
+
+ @classmethod
+ def find_module(cls, fullname):
+ return None
+
+
+def install():
+ if not any(finder == _EditableFinder for finder in sys.meta_path):
+ sys.meta_path.append(_EditableFinder)
+
+ if not NAMESPACES:
+ return
+
+ if not any(hook == _EditableNamespaceFinder._path_hook for hook in sys.path_hooks):
+ # PathEntryFinder is needed to create NamespaceSpec without private APIS
+ sys.path_hooks.append(_EditableNamespaceFinder._path_hook)
+ if PATH_PLACEHOLDER not in sys.path:
+ sys.path.append(PATH_PLACEHOLDER) # Used just to trigger the path hook
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be25fa1ebb9ca2f241b5c35cb2508d507267fd86
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1ba3c44d809649267be13327420d27a3746b126
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/accelerator.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1688e0f6a8146cb7cc35109f5e324ff492814a07
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/big_modeling.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c85d0d66467ae259a227adae7673a94f59fa15c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/checkpointing.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f13d4bdd985df560a9a146c47c8b32e7d13750bc
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/data_loader.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b24dd95347e268d0debe33a77bf32d12b652675
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/hooks.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f39206e4297cda94a21022eb585a52e798c61b0c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/inference.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..572e7928cd850d810a2a1bb75f4fa7be02f191cf
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/launchers.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d161ced973d06743c6a8deb06b1997538c915e6a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/local_sgd.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3590773b28d8c58b8cfef75fde2aa29a49b06575
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/logging.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfb784b08cd110be09ce425d59b7c1c249058702
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/memory_utils.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bccd158e998d6b50014ba6eac056f4d2651658a8
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/optimizer.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e9fb5c6e7f0b5835e7ccf4d05b3e83bfb71dfac
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/scheduler.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd844af2deaff1ac63d2933d6e6d153668669980
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/state.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8012f53b3b6eee0794cdb695b5cc2dceaf9512b7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/__pycache__/tracking.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/accelerator.py b/env-llmeval/lib/python3.10/site-packages/accelerate/accelerator.py
new file mode 100644
index 0000000000000000000000000000000000000000..4786946c6da68e12a29b5f3cd799d40e6af49c4a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/accelerator.py
@@ -0,0 +1,3259 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import annotations
+
+import contextlib
+import functools
+import json
+import math
+import os
+import re
+import shutil
+import sys
+import warnings
+from collections import OrderedDict
+from contextlib import contextmanager
+from functools import partial
+from types import MethodType
+from typing import Any, Callable, Union
+
+import torch
+import torch.utils.hooks as hooks
+
+from .checkpointing import load_accelerator_state, load_custom_state, save_accelerator_state, save_custom_state
+from .data_loader import DataLoaderDispatcher, prepare_data_loader, skip_first_batches
+from .hooks import AlignDevicesHook
+from .logging import get_logger
+from .optimizer import AcceleratedOptimizer
+from .scheduler import AcceleratedScheduler
+from .state import AcceleratorState, GradientState, PartialState
+from .tracking import LOGGER_TYPE_TO_CLASS, GeneralTracker, filter_trackers
+from .utils import (
+ MODEL_NAME,
+ SAFE_WEIGHTS_INDEX_NAME,
+ SAFE_WEIGHTS_NAME,
+ WEIGHTS_INDEX_NAME,
+ WEIGHTS_NAME,
+ AutocastKwargs,
+ DataLoaderConfiguration,
+ DeepSpeedPlugin,
+ DistributedDataParallelKwargs,
+ DistributedType,
+ DynamoBackend,
+ FP8RecipeKwargs,
+ FullyShardedDataParallelPlugin,
+ GradientAccumulationPlugin,
+ GradScalerKwargs,
+ InitProcessGroupKwargs,
+ KwargsHandler,
+ LoggerType,
+ MegatronLMPlugin,
+ PrecisionType,
+ ProjectConfiguration,
+ RNGType,
+ TorchDynamoPlugin,
+ check_os_kernel,
+ clean_state_dict_for_safetensors,
+ compare_versions,
+ convert_model,
+ convert_outputs_to_fp32,
+ extract_model_from_parallel,
+ gather,
+ gather_object,
+ get_mixed_precision_context_manager,
+ get_pretty_name,
+ has_transformer_engine_layers,
+ is_bf16_available,
+ is_deepspeed_available,
+ is_fp8_available,
+ is_ipex_available,
+ is_megatron_lm_available,
+ is_mlu_available,
+ is_msamp_available,
+ is_npu_available,
+ is_torch_version,
+ is_torch_xla_available,
+ is_xpu_available,
+ load_fsdp_model,
+ load_fsdp_optimizer,
+ pad_across_processes,
+ parse_choice_from_env,
+ recursively_apply,
+ reduce,
+ release_memory,
+ save,
+ save_fsdp_model,
+ save_fsdp_optimizer,
+ shard_checkpoint,
+ wait_for_everyone,
+)
+from .utils.constants import FSDP_PYTORCH_VERSION
+from .utils.modeling import get_state_dict_offloaded_model
+from .utils.other import is_compiled_module
+
+
+if is_deepspeed_available():
+ from .utils import (
+ DeepSpeedEngineWrapper,
+ DeepSpeedOptimizerWrapper,
+ DeepSpeedSchedulerWrapper,
+ DummyOptim,
+ DummyScheduler,
+ )
+
+if is_fp8_available():
+ import transformer_engine.common.recipe as te_recipe
+ from transformer_engine.pytorch import fp8_autocast
+
+
+if is_megatron_lm_available():
+ from .utils import (
+ MegatronEngine,
+ MegatronLMDummyDataLoader,
+ MegatronLMDummyScheduler,
+ MegatronLMOptimizerWrapper,
+ MegatronLMSchedulerWrapper,
+ megatron_lm_initialize,
+ megatron_lm_prepare_data_loader,
+ megatron_lm_prepare_model,
+ megatron_lm_prepare_optimizer,
+ megatron_lm_prepare_scheduler,
+ )
+
+from torch.distributed.algorithms.join import Join
+
+
+if is_torch_xla_available():
+ import torch_xla.amp as xamp
+ import torch_xla.core.xla_model as xm
+ import torch_xla.distributed.xla_multiprocessing as xmp
+
+
+if is_npu_available(check_device=False):
+ import torch_npu # noqa: F401
+
+
+try:
+ from torch.optim.lr_scheduler import LRScheduler
+except ImportError:
+ from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
+
+logger = get_logger(__name__)
+
+# Sentinel values for defaults
+_split_batches = object()
+_dispatch_batches = object()
+_even_batches = object()
+_use_seedable_sampler = object()
+
+
+class Accelerator:
+ """
+ Creates an instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training.
+
+ Args:
+ device_placement (`bool`, *optional*, defaults to `True`):
+ Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
+ etc...).
+ mixed_precision (`str`, *optional*):
+ Whether or not to use mixed precision training. Choose from 'no','fp16','bf16 or 'fp8'. Will default to the
+ value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the
+ accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8'
+ requires the installation of transformers-engine.
+ gradient_accumulation_steps (`int`, *optional*, default to 1):
+ The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with
+ `Accelerator.accumulate`. If not passed, will default to the value in the environment variable
+ `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`.
+ cpu (`bool`, *optional*):
+ Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force
+ the execution on one process only.
+ dataloader_config (`DataLoaderConfiguration`, *optional*):
+ A configuration for how the dataloaders should be handled in distributed scenarios.
+ deepspeed_plugin ([`~utils.DeepSpeedPlugin`], *optional*):
+ Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured
+ directly using *accelerate config*
+ fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*):
+ Tweak your FSDP related args using this argument. This argument is optional and can be configured directly
+ using *accelerate config*
+ megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*):
+ Tweak your MegatronLM related args using this argument. This argument is optional and can be configured
+ directly using *accelerate config*
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration in your prepared
+ dataloaders. Should be one or several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
+
+ Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6.
+ log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
+ A list of loggers to be setup for experiment tracking. Should be one or several of:
+
+ - `"all"`
+ - `"tensorboard"`
+ - `"wandb"`
+ - `"comet_ml"`
+ If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
+ also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
+ project_config ([`~utils.ProjectConfiguration`], *optional*):
+ A configuration for how saving the state can be handled.
+ project_dir (`str`, `os.PathLike`, *optional*):
+ A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
+ checkpoints.
+ step_scheduler_with_optimizer (`bool`, *optional`, defaults to `True`):
+ Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
+ done under certain circumstances (at the end of each epoch, for instance).
+ kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
+ A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training or mixed
+ precision are created. See [kwargs](kwargs) for more information.
+ dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
+ Set to one of the possible dynamo backends to optimize your training with torch dynamo.
+ gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
+ A configuration for how gradient accumulation should be handled, if more tweaking than just the
+ `gradient_accumulation_steps` is needed.
+
+ **Available attributes:**
+
+ - **device** (`torch.device`) -- The device to use.
+ - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.
+ - **local_process_index** (`int`) -- The process index on the current machine.
+ - **mixed_precision** (`str`) -- The configured mixed precision mode.
+ - **num_processes** (`int`) -- The total number of processes used for training.
+ - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of
+ gradient overflow in mixed precision), in which
+ case the learning rate should not be changed.
+ - **process_index** (`int`) -- The overall index of the current process among all processes.
+ - **state** ([`~state.AcceleratorState`]) -- The distributed setup state.
+ - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
+ - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
+ """
+
+ def __init__(
+ self,
+ device_placement: bool = True,
+ split_batches: bool = _split_batches,
+ mixed_precision: PrecisionType | str | None = None,
+ gradient_accumulation_steps: int = 1,
+ cpu: bool = False,
+ dataloader_config: DataLoaderConfiguration | None = None,
+ deepspeed_plugin: DeepSpeedPlugin | None = None,
+ fsdp_plugin: FullyShardedDataParallelPlugin | None = None,
+ megatron_lm_plugin: MegatronLMPlugin | None = None,
+ rng_types: list[str | RNGType] | None = None,
+ log_with: str | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | None = None,
+ project_dir: str | os.PathLike | None = None,
+ project_config: ProjectConfiguration | None = None,
+ gradient_accumulation_plugin: GradientAccumulationPlugin | None = None,
+ dispatch_batches: bool | None = _dispatch_batches,
+ even_batches: bool = _even_batches,
+ use_seedable_sampler: bool = _use_seedable_sampler,
+ step_scheduler_with_optimizer: bool = True,
+ kwargs_handlers: list[KwargsHandler] | None = None,
+ dynamo_backend: DynamoBackend | str | None = None,
+ ):
+ self.trackers = []
+ if project_config is not None:
+ self.project_configuration = project_config
+ else:
+ self.project_configuration = ProjectConfiguration(project_dir=project_dir)
+ if project_dir is not None and self.project_dir is None:
+ self.project_configuration.set_directories(project_dir)
+ if mixed_precision is not None:
+ mixed_precision = str(mixed_precision)
+ if mixed_precision not in PrecisionType:
+ raise ValueError(
+ f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}"
+ )
+
+ dynamo_plugin = TorchDynamoPlugin() if dynamo_backend is None else TorchDynamoPlugin(backend=dynamo_backend)
+
+ if deepspeed_plugin is None: # init from env variables
+ deepspeed_plugin = (
+ DeepSpeedPlugin() if os.environ.get("ACCELERATE_USE_DEEPSPEED", "false") == "true" else None
+ )
+ else:
+ assert isinstance(
+ deepspeed_plugin, DeepSpeedPlugin
+ ), "`deepspeed_plugin` must be an `accelerate.utils.DeepSpeedPlugin` object."
+ os.environ["ACCELERATE_USE_DEEPSPEED"] = "true" # use DeepSpeed if plugin is provided
+ if deepspeed_plugin:
+ if not is_deepspeed_available():
+ raise ImportError("DeepSpeed is not installed => run `pip install deepspeed` or build it from source.")
+ if is_mlu_available():
+ if compare_versions("deepspeed-mlu", "<", "0.10.1"):
+ raise ImportError("DeepSpeed MLU version must be >= 0.10.1. Please update DeepSpeed MLU.")
+ elif compare_versions("deepspeed", "<", "0.9.3"):
+ raise ImportError("DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.")
+
+ mixed_precision = (
+ os.environ.get("ACCELERATE_MIXED_PRECISION", "no") if mixed_precision is None else mixed_precision
+ )
+ deepspeed_plugin.set_mixed_precision(mixed_precision)
+ deepspeed_plugin.set_deepspeed_weakref()
+
+ if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" or isinstance(
+ fsdp_plugin, FullyShardedDataParallelPlugin
+ ):
+ if is_torch_version("<", FSDP_PYTORCH_VERSION):
+ raise ValueError(f"FSDP requires PyTorch >= {FSDP_PYTORCH_VERSION}")
+
+ if fsdp_plugin is None: # init from env variables
+ fsdp_plugin = (
+ FullyShardedDataParallelPlugin() if os.environ.get("ACCELERATE_USE_FSDP", "false") == "true" else None
+ )
+ else:
+ if not isinstance(fsdp_plugin, FullyShardedDataParallelPlugin):
+ raise TypeError("`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.")
+ os.environ["ACCELERATE_USE_FSDP"] = "true" # use FSDP if plugin is provided
+
+ if megatron_lm_plugin is None: # init from env variables
+ megatron_lm_plugin = (
+ MegatronLMPlugin() if os.environ.get("ACCELERATE_USE_MEGATRON_LM", "false") == "true" else None
+ )
+ else:
+ if not isinstance(megatron_lm_plugin, MegatronLMPlugin):
+ raise TypeError("`megatron_lm_plugin` must be a MegatronLMPlugin object.")
+ os.environ["ACCELERATE_USE_MEGATRON_LM"] = "true" # use MegatronLM if plugin is provided
+
+ if megatron_lm_plugin:
+ if not is_megatron_lm_available():
+ raise ImportError("Megatron is not installed. please build it from source.")
+
+ # Kwargs handlers
+ self.ddp_handler = None
+ self.scaler_handler = None
+ self.init_handler = None
+ self.fp8_recipe_handler = None
+ self.autocast_handler = None
+ if kwargs_handlers is not None:
+ for handler in kwargs_handlers:
+ assert isinstance(
+ handler, KwargsHandler
+ ), f"Unsupported kwargs handler passed: {handler}, must be one that inherits `accelerate.utils.KwargsHandler`."
+ if isinstance(handler, DistributedDataParallelKwargs):
+ if self.ddp_handler is not None:
+ raise ValueError("You can only pass one `DistributedDataParallelKwargs` in `kwargs_handler`.")
+ else:
+ self.ddp_handler = handler
+ elif isinstance(handler, GradScalerKwargs):
+ if self.scaler_handler is not None:
+ raise ValueError("You can only pass one `GradScalerKwargs` in `kwargs_handler`.")
+ else:
+ self.scaler_handler = handler
+ elif isinstance(handler, InitProcessGroupKwargs):
+ if self.init_handler is not None:
+ raise ValueError("You can only pass one `InitProcessGroupKwargs` in `kwargs_handler`.")
+ else:
+ self.init_handler = handler
+ elif isinstance(handler, FP8RecipeKwargs):
+ if self.fp8_recipe_handler is not None:
+ raise ValueError("You can only pass one `FP8RecipeKwargs` in `kwargs_handler`.")
+ else:
+ self.fp8_recipe_handler = handler
+ elif isinstance(handler, AutocastKwargs):
+ if self.autocast_handler is not None:
+ raise ValueError("You can only pass one `AutocastKwargs` in `kwargs_handler`.")
+ else:
+ self.autocast_handler = handler
+
+ kwargs = self.init_handler.to_kwargs() if self.init_handler is not None else {}
+ self.state = AcceleratorState(
+ mixed_precision=mixed_precision,
+ cpu=cpu,
+ dynamo_plugin=dynamo_plugin,
+ deepspeed_plugin=deepspeed_plugin,
+ fsdp_plugin=fsdp_plugin,
+ megatron_lm_plugin=megatron_lm_plugin,
+ _from_accelerator=True,
+ **kwargs,
+ )
+
+ if self.fp8_recipe_handler is None and self.state.mixed_precision == "fp8":
+ self.fp8_recipe_handler = FP8RecipeKwargs(backend="MSAMP" if is_msamp_available() else "TE")
+
+ trackers = filter_trackers(log_with, self.logging_dir)
+ if len(trackers) < 1 and log_with is not None:
+ warnings.warn(f"`log_with={log_with}` was passed but no supported trackers are currently installed.")
+ self.log_with = trackers
+
+ if (
+ (mixed_precision != "bf16")
+ and getattr(self.state, "downcast_bfloat", False)
+ and (self.state.distributedType != DistributedType.XLA)
+ ):
+ raise ValueError("Can only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPU")
+
+ if gradient_accumulation_plugin is not None:
+ if gradient_accumulation_steps != 1:
+ raise ValueError(
+ "You can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object."
+ )
+ else:
+ gradient_accumulation_steps = int(
+ parse_choice_from_env("ACCELERATE_GRADIENT_ACCUMULATION_STEPS", gradient_accumulation_steps)
+ )
+ gradient_accumulation_plugin = GradientAccumulationPlugin(num_steps=gradient_accumulation_steps)
+ self.gradient_state = GradientState(
+ gradient_accumulation_plugin=gradient_accumulation_plugin,
+ )
+
+ self.device_placement = device_placement
+ if dataloader_config is None:
+ dataloader_config = DataLoaderConfiguration()
+ self.dataloader_config = dataloader_config
+ # Deal with deprecated args
+ # TODO: Remove in v1.0.0
+ deprecated_dl_args = {}
+ if dispatch_batches is not _dispatch_batches:
+ deprecated_dl_args["dispatch_batches"] = dispatch_batches
+ self.dataloader_config.dispatch_batches = dispatch_batches
+ if split_batches is not _split_batches:
+ deprecated_dl_args["split_batches"] = split_batches
+ self.dataloader_config.split_batches = split_batches
+ if even_batches is not _even_batches:
+ deprecated_dl_args["even_batches"] = even_batches
+ self.dataloader_config.even_batches = even_batches
+ if use_seedable_sampler is not _use_seedable_sampler:
+ deprecated_dl_args["use_seedable_sampler"] = use_seedable_sampler
+ self.dataloader_config.use_seedable_sampler = use_seedable_sampler
+ if len(deprecated_dl_args) > 0:
+ values = ", ".join([f"{k}={v}" for k, v in deprecated_dl_args.items()])
+ warnings.warn(
+ f"Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: {deprecated_dl_args.keys()}. "
+ "Please pass an `accelerate.DataLoaderConfiguration` instead: \n"
+ f"dataloader_config = DataLoaderConfiguration({values})",
+ FutureWarning,
+ )
+ self.step_scheduler_with_optimizer = step_scheduler_with_optimizer
+
+ # Mixed precision attributes
+ self.scaler = None
+ self.native_amp = False
+ if (
+ self.state.mixed_precision == "fp16"
+ and self.device.type != "cpu"
+ and self.distributed_type not in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM)
+ ):
+ self.native_amp = True
+ if self.device.type not in ("xpu", "cuda", "mps", "npu", "xla", "mlu") or is_torch_xla_available(
+ check_is_tpu=True
+ ):
+ raise ValueError(f"fp16 mixed precision requires a GPU (not {self.device.type!r}).")
+ kwargs = self.scaler_handler.to_kwargs() if self.scaler_handler is not None else {}
+ if self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
+
+ self.scaler = ShardedGradScaler(**kwargs)
+ elif is_torch_xla_available(check_is_gpu=True):
+ self.scaler = xamp.GradScaler(**kwargs)
+ elif is_mlu_available():
+ self.scaler = torch.mlu.amp.GradScaler(**kwargs)
+ elif is_npu_available():
+ self.scaler = torch.npu.amp.GradScaler(**kwargs)
+ else:
+ self.scaler = torch.cuda.amp.GradScaler(**kwargs)
+
+ elif self.state.mixed_precision == "bf16" and self.distributed_type not in (
+ DistributedType.DEEPSPEED,
+ DistributedType.MEGATRON_LM,
+ ):
+ if self.device.type in ["cpu", "xpu"]:
+ self.native_amp = True
+ else:
+ self.native_amp = is_bf16_available(True)
+ if mixed_precision == "bf16" and not self.native_amp and not is_torch_xla_available():
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
+
+ # Start of internal step tracking
+ self.step = 0
+
+ # Internal references to the training objects
+ self._optimizers = []
+ self._models = []
+ self._schedulers = []
+ self._dataloaders = []
+ self._custom_objects = []
+
+ # Hooks
+ self._load_model_state_pre_hook = OrderedDict()
+ self._save_model_state_pre_hook = OrderedDict()
+
+ # RNG Types
+ self.rng_types = rng_types
+ if self.rng_types is None:
+ self.rng_types = ["generator"]
+
+ # Set a flag tensor for early stopping and other breakpoints
+ self.flag_tensor = None
+
+ check_os_kernel()
+
+ @property
+ def use_distributed(self):
+ """
+ Whether the Accelerator is configured for distributed training
+ """
+ return self.state.use_distributed
+
+ @property
+ def distributed_type(self):
+ return self.state.distributed_type
+
+ @property
+ def num_processes(self):
+ return self.state.num_processes
+
+ @property
+ def process_index(self):
+ return self.state.process_index
+
+ @property
+ def local_process_index(self):
+ return self.state.local_process_index
+
+ @property
+ def device(self):
+ return self.state.device
+
+ @property
+ def split_batches(self):
+ return self.dataloader_config.split_batches
+
+ @property
+ def dispatch_batches(self):
+ return self.dataloader_config.dispatch_batches
+
+ @property
+ def even_batches(self):
+ return self.dataloader_config.even_batches
+
+ @even_batches.setter
+ def even_batches(self, value: bool):
+ self.dataloader_config.even_batches = value
+
+ @property
+ def use_seedable_sampler(self):
+ return self.dataloader_config.use_seedable_sampler
+
+ @property
+ def project_dir(self):
+ return self.project_configuration.project_dir
+
+ @property
+ def logging_dir(self):
+ return self.project_configuration.logging_dir
+
+ @property
+ def save_iteration(self):
+ return self.project_configuration.iteration
+
+ @property
+ def is_main_process(self):
+ """True for one process only."""
+ return self.state.is_main_process
+
+ @property
+ def is_local_main_process(self):
+ """True for one process per server."""
+ return self.state.is_local_main_process
+
+ @property
+ def use_fp16(self):
+ warnings.warn(
+ "The `use_fp16` property is deprecated and will be removed in version 1.0 of Accelerate use "
+ "`Accelerator.mixed_precision == 'fp16'` instead.",
+ FutureWarning,
+ )
+ return self.mixed_precision != "no"
+
+ @property
+ def is_last_process(self):
+ return self.process_index == self.num_processes - 1
+
+ @property
+ def mixed_precision(self):
+ return self.state.mixed_precision
+
+ @contextmanager
+ def split_between_processes(self, inputs: list | tuple | dict | torch.Tensor, apply_padding: bool = False):
+ """
+ Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
+ distributed inference, such as with different prompts.
+
+ Note that when using a `dict`, all keys need to have the same number of elements.
+
+ Args:
+ inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
+ The input to split between processes.
+ apply_padding (`bool`, `optional`, defaults to `False`):
+ Whether to apply padding by repeating the last element of the input so that all processes have the same
+ number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs
+ or passing in less inputs than there are processes. If so, just remember to drop the padded elements
+ afterwards.
+
+ Example:
+
+ ```python
+ # Assume there are two processes
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+ with accelerator.split_between_processes(["A", "B", "C"]) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C"]
+
+ with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
+ print(inputs)
+ # Process 0
+ ["A", "B"]
+ # Process 1
+ ["C", "C"]
+ ```
+ """
+ with PartialState().split_between_processes(inputs, apply_padding=apply_padding) as inputs:
+ yield inputs
+
+ def on_main_process(self, function: Callable[..., Any] = None):
+ """
+ A decorator that will run the decorated function on the main process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+
+
+ >>> @accelerator.on_main_process
+ ... def print_something():
+ ... print("This will be printed by process 0 only.")
+
+
+ >>> print_something()
+ "This will be printed by process 0 only"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_main_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_local_main_process(self, function: Callable[..., Any] = None):
+ """
+ A decorator that will run the decorated function on the local main process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_local_main_process
+ def print_something():
+ print("This will be printed by process 0 only on each server.")
+
+
+ print_something()
+ # On server 1:
+ "This will be printed by process 0 only"
+ # On server 2:
+ "This will be printed by process 0 only"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_main_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_last_process(self, function: Callable[..., Any]):
+ """
+ A decorator that will run the decorated function on the last process only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`): The function to decorate.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_last_process
+ def print_something():
+ print(f"Printed on process {accelerator.process_index}")
+
+
+ print_something()
+ "Printed on process 3"
+ ```
+ """
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_last_process(function)(*args, **kwargs)
+
+ return _inner
+
+ def on_process(self, function: Callable[..., Any] = None, process_index: int = None):
+ """
+ A decorator that will run the decorated function on a given process index only. Can also be called using the
+ `PartialState` class.
+
+ Args:
+ function (`Callable`, `optional`):
+ The function to decorate.
+ process_index (`int`, `optional`):
+ The index of the process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 4 processes.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_process(process_index=2)
+ def print_something():
+ print(f"Printed on process {accelerator.process_index}")
+
+
+ print_something()
+ "Printed on process 2"
+ ```
+ """
+ # Initial construction of the decorator.
+ if (self is not None) and (process_index is not None) and (function is None):
+ return partial(self.on_process, process_index=process_index)
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_process(function, process_index)(*args, **kwargs)
+
+ return _inner
+
+ def on_local_process(self, function: Callable[..., Any] = None, local_process_index: int = None):
+ """
+ A decorator that will run the decorated function on a given local process index only. Can also be called using
+ the `PartialState` class.
+
+ Args:
+ function (`Callable`, *optional*):
+ The function to decorate.
+ local_process_index (`int`, *optional*):
+ The index of the local process on which to run the function.
+
+ Example:
+ ```python
+ # Assume we have 2 servers with 4 processes each.
+ from accelerate import Accelerator
+
+ accelerator = Accelerator()
+
+
+ @accelerator.on_local_process(local_process_index=2)
+ def print_something():
+ print(f"Printed on process {accelerator.local_process_index}")
+
+
+ print_something()
+ # On server 1:
+ "Printed on process 2"
+ # On server 2:
+ "Printed on process 2"
+ ```
+ """
+ # Initial construction of the decorator.
+ if (self is not None) and (local_process_index is not None) and (function is None):
+ return partial(self.on_local_process, local_process_index=local_process_index)
+ # For times when the `Accelerator` object itself utilizes this decorator.
+ if function is None:
+ if "Accelerator." in self.__qualname__:
+ function = self
+ else:
+ raise ValueError(
+ "The `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object."
+ )
+
+ def _inner(*args, **kwargs):
+ return PartialState().on_local_process(function, local_process_index)(*args, **kwargs)
+
+ return _inner
+
+ @contextmanager
+ def main_process_first(self):
+ """
+ Lets the main process go first inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> with accelerator.main_process_first():
+ ... # This will be printed first by process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {accelerator.process_index}")
+ ```
+ """
+ with self.state.main_process_first():
+ yield
+
+ @contextmanager
+ def local_main_process_first(self):
+ """
+ Lets the local main process go inside a with block.
+
+ The other processes will enter the with block after the main process exits.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> with accelerator.local_main_process_first():
+ ... # This will be printed first by local process 0 then in a seemingly
+ ... # random order by the other processes.
+ ... print(f"This will be printed by process {accelerator.local_process_index}")
+ ```
+ """
+ with self.state.local_main_process_first():
+ yield
+
+ @contextmanager
+ def no_sync(self, model):
+ """
+ A context manager to disable gradient synchronizations across DDP processes by calling
+ `torch.nn.parallel.DistributedDataParallel.no_sync`.
+
+ If `model` is not in DDP, this context manager does nothing
+
+ Args:
+ model (`torch.nn.Module`):
+ PyTorch Module that was prepared with `Accelerator.prepare`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
+ >>> input_a = next(iter(dataloader))
+ >>> input_b = next(iter(dataloader))
+
+ >>> with accelerator.no_sync():
+ ... outputs = model(input_a)
+ ... loss = loss_func(outputs)
+ ... accelerator.backward(loss)
+ ... # No synchronization across processes, only accumulate gradients
+ >>> outputs = model(input_b)
+ >>> accelerator.backward(loss)
+ >>> # Synchronization across all processes
+ >>> optimizer.step()
+ >>> optimizer.zero_grad()
+ ```
+ """
+ context = contextlib.nullcontext
+ if self.use_distributed:
+ context = getattr(model, "no_sync", context)
+
+ with context():
+ yield
+
+ @staticmethod
+ @contextmanager
+ def trigger_sync_in_backward(model):
+ """Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
+ `Accelerator.no_sync` (only applicable in multi-GPU scenarios).
+
+ If the script is not launched in distributed mode, this context manager does nothing.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model for which to trigger the gradient synchronization.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
+
+ >>> with accelerator.no_sync():
+ ... loss_a = loss_func(model(input_a)) # first forward pass
+ ... loss_b = loss_func(model(input_b)) # second forward pass
+ >>> accelerator.backward(loss_a) # No synchronization across processes, only accumulate gradients
+ >>> with accelerator.trigger_sync_in_backward(model):
+ ... accelerator.backward(loss_b) # Synchronization across all processes
+ >>> optimizer.step()
+ >>> optimizer.zero_grad()
+ ```
+ """
+ if not isinstance(model, torch.nn.parallel.DistributedDataParallel):
+ yield
+ return
+
+ old_require_backward_grad_sync = model.require_backward_grad_sync
+ old_require_forward_param_sync = model.require_forward_param_sync
+
+ # EXPERIMENTAL: This will force grad sync during `backward()`, but it is unknown if it breaks other DDP features.
+ # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/nn/parallel/distributed.py#L1453-L1466
+ model.require_backward_grad_sync = True
+ model.require_forward_param_sync = True
+ # https://github.com/pytorch/pytorch/blob/e1502c0cdbfd17548c612f25d5a65b1e4b86224d/torch/csrc/distributed/c10d/reducer.cpp#L1371-L1402
+ model.reducer.prepare_for_backward([])
+ try:
+ yield
+ finally:
+ model.require_backward_grad_sync = old_require_backward_grad_sync
+ model.require_forward_param_sync = old_require_forward_param_sync
+
+ def _do_sync(self, force: bool = False):
+ "Sets the right `sync_gradients` context and either resets or increases `self.step`"
+ if self.gradient_state.sync_with_dataloader and self.gradient_state.end_of_dataloader:
+ self.step = 0
+ self.gradient_state._set_sync_gradients(True)
+ else:
+ self.step += 1
+ self.gradient_state._set_sync_gradients(force or ((self.step % self.gradient_state.num_steps) == 0))
+
+ @property
+ def sync_gradients(self):
+ return self.gradient_state.sync_gradients
+
+ @sync_gradients.setter
+ def sync_gradients(self, sync_gradients):
+ self.gradient_state.sync_gradients = sync_gradients
+
+ @property
+ def gradient_accumulation_steps(self):
+ return self.gradient_state.num_steps
+
+ @gradient_accumulation_steps.setter
+ def gradient_accumulation_steps(self, gradient_accumulation_steps):
+ self.gradient_state.plugin_kwargs.update({"num_steps": gradient_accumulation_steps})
+
+ @contextmanager
+ def accumulate(self, *models):
+ """
+ A context manager that will lightly wrap around and perform gradient accumulation automatically
+
+ Args:
+ *models (list of `torch.nn.Module`):
+ PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will
+ skip gradient syncing during backward pass in distributed training
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=1)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, output in dataloader:
+ ... with accelerator.accumulate(model):
+ ... outputs = model(input)
+ ... loss = loss_func(outputs)
+ ... loss.backward()
+ ... optimizer.step()
+ ... scheduler.step()
+ ... optimizer.zero_grad()
+ ```
+ """
+ # sync_each_batch=True will guarantee below that self.sync_gradients=True, therefore
+ # resulting in the nullcontext always being selected.
+ self._do_sync(force=self.gradient_state.plugin_kwargs.get("sync_each_batch", False))
+ with contextlib.ExitStack() as cm_stack:
+ for m in models:
+ cm_stack.enter_context(contextlib.nullcontext() if self.sync_gradients else self.no_sync(m))
+ yield
+
+ @contextmanager
+ def join_uneven_inputs(self, joinables, even_batches=None):
+ """
+ A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
+ around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the
+ length of the dataset.
+
+ Args:
+ joinables (`list[torch.distributed.algorithms.Joinable]`):
+ A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a
+ PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.
+ even_batches (`bool`, *optional*)
+ If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,
+ the default `Accelerator` value wil be used.
+
+
+
+ `join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other
+ configuration, this method will have no effect.
+
+
+
+
+
+ Overidding `even_batches` will not affect iterable-style data loaders.
+
+
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(even_batches=True)
+ >>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)
+
+ >>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
+ ... for input, output in dataloader:
+ ... outputs = model(input)
+ ... loss = loss_func(outputs)
+ ... loss.backward()
+ ... optimizer.step()
+ ... optimizer.zero_grad()
+ ```
+ """
+ if self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_XPU,
+ ):
+ dl_even_batches_values = []
+
+ if even_batches is not None:
+ iterable_dl_seen = False
+ # override value in batch sampler for map-style datasets
+ for dl_idx, dl in enumerate(self._dataloaders):
+ if isinstance(dl, DataLoaderDispatcher):
+ iterable_dl_seen = True
+ continue
+ dl_even_batches_values.append((dl_idx, dl.batch_sampler.even_batches))
+ dl.batch_sampler.even_batches = even_batches
+
+ if iterable_dl_seen:
+ warnings.warn(
+ "Overridding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable"
+ )
+ else:
+ even_batches = self.even_batches
+
+ enable_join = False if even_batches else True
+ try:
+ with Join(joinables, enable=enable_join, throw_on_early_termination=False):
+ yield
+ finally:
+ # reset any batch samplers that have been modified
+ for dl_idx, even_batches_value in dl_even_batches_values:
+ self._dataloaders[dl_idx].batch_sampler.even_batches = even_batches_value
+ else:
+ # Even when disabled, Join expects models to subclass Joinable, so skip entirely for single process runs
+ if self.distributed_type != DistributedType.NO:
+ warnings.warn(
+ "Joining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect."
+ )
+
+ with contextlib.nullcontext(joinables):
+ yield
+
+ def print(self, *args, **kwargs):
+ """
+ Drop in replacement of `print()` to only print once per server.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> accelerator.print("Hello world!")
+ ```
+ """
+ self.state.print(*args, **kwargs)
+
+ def _prepare_one(self, obj, first_pass=False, device_placement=None):
+ # First pass of preparation: DataLoader, model, optimizer
+ if first_pass:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ return self.prepare_data_loader(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.nn.Module):
+ return self.prepare_model(obj, device_placement=device_placement)
+ elif isinstance(obj, torch.optim.Optimizer):
+ optimizer = self.prepare_optimizer(obj, device_placement=device_placement)
+ return optimizer
+ # Second pass of preparation: LR scheduler (which need the full list of optimizers)
+ elif isinstance(obj, LRScheduler):
+ scheduler = self.prepare_scheduler(obj)
+ return scheduler
+ # Return the unprocessed object if previous criteria was not met
+ return obj
+
+ def prepare(self, *args, device_placement=None):
+ """
+ Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
+ order.
+
+ Args:
+ *args (list of objects):
+ Any of the following type of objects:
+
+ - `torch.utils.data.DataLoader`: PyTorch Dataloader
+ - `torch.nn.Module`: PyTorch Module
+ - `torch.optim.Optimizer`: PyTorch Optimizer
+ - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler
+
+ device_placement (`list[bool]`, *optional*):
+ Used to customize whether automatic device placement should be performed for each object passed. Needs
+ to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP.
+
+
+
+ You don't need to prepare a model if you only use it for inference without any kind of mixed precision
+
+
+
+ Examples:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model, optimizer, data_loader and scheduler are defined
+ >>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
+ ```
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model, optimizer, data_loader and scheduler are defined
+ >>> device_placement = [True, True, False, False]
+ >>> # Will place the first to items passed in automatically to the right device but not the last two.
+ >>> model, optimizer, data_loader, scheduler = accelerator.prepare(
+ ... model, optimizer, data_loader, scheduler, device_placement=device_placement
+ ... )
+ ```
+ """
+ if device_placement is None:
+ device_placement = [None for _ in args]
+ elif self.distributed_type in (DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM):
+ raise ValueError("You can't customize device placements with DeepSpeed or Megatron-LM.")
+ elif len(device_placement) != len(args):
+ raise ValueError(
+ f"`device_placement` should be a list with {len(args)} elements (the number of objects passed)."
+ )
+
+ for obj in args:
+ # TODO: Look at enabling native TP training directly with a proper config
+ if (
+ isinstance(obj, torch.nn.Module)
+ and self.verify_device_map(obj)
+ and self.distributed_type != DistributedType.NO
+ and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
+ ):
+ raise ValueError(
+ "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
+ " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
+ )
+
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ model_count = 0
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ model_count += 1
+ if model_count > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
+ )
+
+ # On TPUs, putting the model on the XLA device will create new parameters, so the corresponding optimizer will
+ # have parameters disconnected from the model (so no training :-( ).
+ # If the model and optimizer have parameters on different devices we raise an error.
+ if self.distributed_type == DistributedType.XLA:
+ model_device, optimizer_device = self._get_devices()
+ if model_device is not None and optimizer_device is not None and model_device != optimizer_device:
+ raise ValueError(
+ "The model and the optimizer parameters are not on the same device, which probably means you "
+ "created an optimizer around your model **before** putting on the device. Make sure the line "
+ "model.to(device) is before the optimizer creation in your script or remove it entirely and use "
+ "the flag default value for `device_placement` in your `Accelerator` to let it handle that "
+ "part for you."
+ )
+
+ # If we're dealing with device placement, this deals with that by...
+ tpu_should_fix_optimizer = self.device_placement and self.distributed_type == DistributedType.XLA
+ if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
+ # 1. grabbing old model parameters
+ old_named_params = self._get_named_parameters(*args)
+
+ if self.distributed_type in [DistributedType.MULTI_CPU, DistributedType.MULTI_XPU, DistributedType.NO]:
+ if self.device.type == "cpu" and self.state.use_ipex:
+ args = self._prepare_ipex(*args)
+ elif self.device.type == "xpu" and is_xpu_available():
+ args = self._prepare_ipex(*args)
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ result = self._prepare_deepspeed(*args)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ result = self._prepare_megatron_lm(*args)
+ else:
+ if self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "MSAMP":
+ args = self._prepare_msamp(*args)
+ # MS-AMP will handle the device placement
+ device_placement = [False for _ in args]
+ result = tuple(
+ self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement)
+ )
+ result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement))
+
+ if tpu_should_fix_optimizer or (self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE"):
+ # 2. grabbing new model parameters
+ new_named_params = self._get_named_parameters(*result)
+ # 3. building a map from the first to the second
+ mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
+ # 4. using that map to update the parameters of the optimizer
+ for obj in result:
+ if isinstance(obj, torch.optim.Optimizer):
+ obj._switch_parameters(mapping)
+
+ for item in result:
+ if any(
+ item in container
+ for container in (self._dataloaders, self._models, self._optimizers, self._schedulers)
+ ):
+ item._is_accelerate_prepared = True
+
+ return result if len(result) > 1 else result[0]
+
+ def prepare_model(self, model: torch.nn.Module, device_placement: bool = None, evaluation_mode: bool = False):
+ """
+ Prepares a PyTorch model for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ model (`torch.nn.Module`):
+ A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without
+ any kind of mixed precision
+ device_placement (`bool`, *optional*):
+ Whether or not to place the model on the proper device. Will default to `self.device_placement`.
+ evaluation_mode (`bool`, *optional*, defaults to `False`):
+ Whether or not to set the model for evaluation only, by just applying mixed precision and
+ `torch.compile` (if configured in the `Accelerator` object).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume a model is defined
+ >>> model = accelerator.prepare_model(model)
+ ```
+ """
+ if device_placement is None:
+ device_placement = self.device_placement and self.distributed_type != DistributedType.FSDP
+ self._models.append(model)
+
+ # TODO: Look at enabling native TP training directly with a proper config
+ if (
+ self.verify_device_map(model)
+ and self.distributed_type != DistributedType.NO
+ and os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true"
+ ):
+ raise ValueError(
+ "You can't train a model that has been loaded with `device_map='auto'` in any distributed mode."
+ " Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`."
+ )
+
+ if self.native_amp:
+ model._original_forward = model.forward
+ model_forward_func = model.forward.__func__ if hasattr(model.forward, "__func__") else model.forward
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, self.autocast_handler)
+ new_forward = autocast_context(model_forward_func)
+ if hasattr(model.forward, "__func__"):
+ model.forward = MethodType(new_forward, model)
+ model.forward = MethodType(convert_outputs_to_fp32(model.forward.__func__), model)
+ else:
+ model.forward = convert_outputs_to_fp32(new_forward)
+ elif self.mixed_precision == "fp8" and self.fp8_recipe_handler.backend == "TE":
+ if not has_transformer_engine_layers(model):
+ with torch.no_grad():
+ convert_model(model)
+ model._converted_to_transformer_engine = True
+ model._original_forward = model.forward
+
+ kwargs = self.fp8_recipe_handler.to_kwargs() if self.fp8_recipe_handler is not None else {}
+ if "fp8_format" in kwargs:
+ kwargs["fp8_format"] = getattr(te_recipe.Format, kwargs["fp8_format"])
+ fp8_recipe = te_recipe.DelayedScaling(**kwargs)
+ model.forward = fp8_autocast(enabled=True, fp8_recipe=fp8_recipe)(model.forward)
+
+ if (getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False)) and getattr(
+ model, "hf_device_map", False
+ ):
+ model_devices = set(model.hf_device_map.values())
+ if len(model_devices) > 1 and self.distributed_type != DistributedType.NO:
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision on multiple devices in any distributed mode."
+ " In order to use 8-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism."
+ " Therefore you should not specify that you are under any distributed regime in your accelerate config."
+ )
+ current_device = list(model_devices)[0]
+ current_device_index = current_device.index if isinstance(current_device, torch.device) else current_device
+
+ if torch.device(current_device_index) != self.device:
+ # if on the first device (GPU 0) we don't care
+ if (self.device.index is not None) or (current_device_index != 0):
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision on a different device than the one "
+ "you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device() or device_map={'':torch.xpu.current_device()}"
+ )
+
+ if "cpu" in model_devices or "disk" in model_devices:
+ raise ValueError(
+ "You can't train a model that has been loaded in 8-bit precision with CPU or disk offload."
+ )
+ elif device_placement and not self.verify_device_map(model):
+ model = model.to(self.device)
+ if not evaluation_mode:
+ if self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ ):
+ if any(p.requires_grad for p in model.parameters()):
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ # TODO: Look at enabling native TP training directly with a proper config
+ if os.environ.get("ACCELERATE_BYPASS_DEVICE_MAP", "false") != "true":
+ device_ids, output_device = [self.local_process_index], self.local_process_index
+ else:
+ device_ids, output_device = None, None
+
+ model = torch.nn.parallel.DistributedDataParallel(
+ model, device_ids=device_ids, output_device=output_device, **kwargs
+ )
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
+
+ # Check if the model is already a FSDP model due to `Manual Wrapping` and if so,
+ # don't wrap it again
+ # In case the model is already compiled using PyTorch 2.0 and the wrapped model in it
+ # is a FSDP model, don't wrap it again
+ is_type_fsdp = isinstance(model, FSDP) or (
+ is_compiled_module(model) and isinstance(model._orig_mod, FSDP)
+ )
+
+ if not is_type_fsdp:
+ self.state.fsdp_plugin.set_auto_wrap_policy(model)
+ fsdp_plugin = self.state.fsdp_plugin
+ kwargs = {
+ "sharding_strategy": fsdp_plugin.sharding_strategy,
+ "cpu_offload": fsdp_plugin.cpu_offload,
+ "auto_wrap_policy": fsdp_plugin.auto_wrap_policy,
+ "mixed_precision": fsdp_plugin.mixed_precision_policy,
+ "sync_module_states": fsdp_plugin.sync_module_states,
+ "backward_prefetch": fsdp_plugin.backward_prefetch,
+ "forward_prefetch": fsdp_plugin.forward_prefetch,
+ "use_orig_params": fsdp_plugin.use_orig_params,
+ "param_init_fn": fsdp_plugin.param_init_fn,
+ "ignored_modules": fsdp_plugin.ignored_modules,
+ "limit_all_gathers": fsdp_plugin.limit_all_gathers,
+ "device_id": self.device,
+ }
+ model = FSDP(model, **kwargs)
+ if fsdp_plugin.activation_checkpointing:
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
+ CheckpointImpl,
+ apply_activation_checkpointing,
+ checkpoint_wrapper,
+ )
+
+ apply_activation_checkpointing(
+ model,
+ checkpoint_wrapper_fn=functools.partial(
+ checkpoint_wrapper,
+ checkpoint_impl=CheckpointImpl.NO_REENTRANT,
+ ),
+ auto_wrap_policy=fsdp_plugin.auto_wrap_policy,
+ )
+ # if the previous and current models are same, delete the previous one
+ if len(self._models) > 1 and (self._models[-2] is self._models[-1]):
+ del self._models[-2]
+ self._models[-1] = model
+ elif self.distributed_type == DistributedType.MULTI_CPU:
+ kwargs = self.ddp_handler.to_kwargs() if self.ddp_handler is not None else {}
+ model = torch.nn.parallel.DistributedDataParallel(model, **kwargs)
+ elif self.distributed_type == DistributedType.XLA and self.state.fork_launched:
+ model = xmp.MpModelWrapper(model).to(self.device)
+ # torch.compile should be called last and only if the model isn't already compiled.
+ if self.state.dynamo_plugin.backend != DynamoBackend.NO and not is_compiled_module(model):
+ if not is_torch_version(">=", "2.0"):
+ raise ValueError("Using `torch.compile` requires PyTorch 2.0 or higher.")
+ model = torch.compile(model, **self.state.dynamo_plugin.to_kwargs())
+ return model
+
+ def _prepare_deepspeed(self, *args):
+ import deepspeed
+
+ deepspeed_plugin = self.state.deepspeed_plugin
+
+ is_dataloader_present = any(isinstance(obj, torch.utils.data.DataLoader) for obj in args)
+ result = [
+ self._prepare_one(obj, first_pass=True) if isinstance(obj, torch.utils.data.DataLoader) else obj
+ for obj in args
+ ]
+
+ if deepspeed_plugin.is_auto("train_micro_batch_size_per_gpu"):
+ if is_dataloader_present:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
+ if any(bs is None for bs in batch_sizes):
+ raise ValueError(
+ "At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. "
+ "Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
+ "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
+ )
+ if self.split_batches:
+ batch_sizes = [batch_size // self.num_processes for batch_size in batch_sizes]
+
+ batch_size_per_device = min(batch_sizes) if deepspeed_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(
+ "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here "
+ f"{deepspeed_plugin.is_train_batch_min} will decide the `train_batch_size` ({batch_size_per_device})."
+ )
+ else:
+ raise ValueError(
+ "When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders "
+ "with `batch_size` attribute returning an integer value "
+ "or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file "
+ "or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`."
+ )
+ else:
+ batch_size_per_device = deepspeed_plugin.get_value("train_micro_batch_size_per_gpu")
+
+ # handle `gradient_accumulation_steps` when the value is `auto`
+ deepspeed_plugin.fill_match(
+ "gradient_accumulation_steps",
+ must_match=False,
+ gradient_accumulation_steps=self.gradient_accumulation_steps,
+ )
+
+ config_kwargs = {
+ "train_micro_batch_size_per_gpu": batch_size_per_device,
+ "train_batch_size": batch_size_per_device
+ * deepspeed_plugin.get_value("gradient_accumulation_steps")
+ * self.num_processes,
+ "gradient_clipping": 1.0,
+ "zero_optimization.stage3_gather_16bit_weights_on_model_save": False,
+ }
+
+ model = None
+ optimizer = None
+ scheduler = None
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, (torch.optim.Optimizer, DummyOptim)):
+ optimizer = obj
+ elif (isinstance(obj, (LRScheduler, DummyScheduler))) or (
+ type(obj).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
+ ):
+ scheduler = obj
+
+ if optimizer is not None:
+ if "optimizer" in deepspeed_plugin.deepspeed_config and not isinstance(optimizer, (DummyOptim)):
+ raise ValueError(
+ "You cannot specify an optimizer in the config file and in the code at the same time. "
+ "Please remove the optimizer from the config file or "
+ "create `accelerate.utils.DummyOptim` in the code."
+ )
+ elif "optimizer" not in deepspeed_plugin.deepspeed_config and isinstance(optimizer, (DummyOptim)):
+ raise ValueError(
+ "You cannot create a `DummyOptim` without specifying an optimizer in the config file."
+ )
+
+ if isinstance(optimizer, (torch.optim.Optimizer)):
+ deepspeed_plugin.deepspeed_config["zero_allow_untested_optimizer"] = True
+
+ if scheduler is not None:
+ if "scheduler" in deepspeed_plugin.deepspeed_config and not isinstance(scheduler, (DummyScheduler)):
+ raise ValueError(
+ "You cannot specify a scheduler in the config file and in the code at the same time. "
+ "Please remove the scheduler from the config file or "
+ "create `accelerate.utils.DummyScheduler` in the code."
+ )
+ elif (
+ "scheduler" not in deepspeed_plugin.deepspeed_config
+ and isinstance(scheduler, (DummyScheduler))
+ and scheduler.lr_scheduler_callable is None
+ ):
+ raise ValueError(
+ "Either specify a scheduler in the config file or "
+ "pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`."
+ )
+
+ if optimizer is not None and scheduler is not None:
+ if isinstance(optimizer, (DummyOptim)) and not isinstance(scheduler, (DummyScheduler)):
+ raise ValueError(
+ "You can only specify `accelerate.utils.DummyScheduler` in the code when using "
+ "`accelerate.utils.DummyOptim`."
+ )
+
+ if model is not None:
+ # deal with config keys that use `auto` value and rely on model's hidden_size
+ hidden_size_based_keys = [
+ "zero_optimization.reduce_bucket_size",
+ "zero_optimization.stage3_prefetch_bucket_size",
+ "zero_optimization.stage3_param_persistence_threshold",
+ ]
+ hidden_size_auto_keys = [x for x in hidden_size_based_keys if deepspeed_plugin.is_auto(x)]
+ if len(hidden_size_auto_keys) > 0:
+ reasoning = (
+ "therefore it's not possible to automatically fill out the following `auto` entries "
+ + f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
+ + "`auto` values for these keys with an integer value of your choice."
+ )
+ if not hasattr(model, "config"):
+ raise ValueError("Can't find `model.config` entry, " + reasoning)
+
+ if hasattr(model.config, "hidden_size"):
+ hidden_size = model.config.hidden_size
+ elif hasattr(model.config, "hidden_sizes"):
+ # if there are many hidden sizes pick the largest one
+ hidden_size = max(model.config.hidden_sizes)
+ else:
+ raise ValueError(
+ "Can find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, " + reasoning
+ )
+
+ config_kwargs.update(
+ {
+ "zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
+ "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
+ "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
+ }
+ )
+
+ if isinstance(optimizer, (DummyOptim)):
+ config_kwargs.update(
+ {"optimizer.params.lr": optimizer.lr, "optimizer.params.weight_decay": optimizer.weight_decay}
+ )
+ if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is None:
+ max_lr = (
+ getattr(scheduler.optimizer, "lr", None)
+ if getattr(scheduler.optimizer, "defaults", None) is None
+ else scheduler.optimizer.defaults["lr"]
+ )
+ config_kwargs.update(
+ {
+ "scheduler.params.warmup_min_lr": 0,
+ "scheduler.params.warmup_max_lr": max_lr,
+ "scheduler.params.warmup_num_steps": scheduler.warmup_num_steps,
+ }
+ )
+ if scheduler.total_num_steps is not None:
+ config_kwargs["scheduler.params.total_num_steps"] = (
+ math.ceil(scheduler.total_num_steps / self.num_processes)
+ if not self.split_batches
+ else scheduler.total_num_steps
+ )
+ deepspeed_plugin.deepspeed_config_process(must_match=False, **config_kwargs)
+ self.deepspeed_config = deepspeed_plugin.deepspeed_config
+ kwargs = dict(model=model, config_params=self.deepspeed_config)
+ if optimizer is not None:
+ if isinstance(optimizer, (DummyOptim)):
+ kwargs["model_parameters"] = optimizer.params
+ if isinstance(scheduler, (DummyScheduler)) and scheduler.lr_scheduler_callable is not None:
+ kwargs["lr_scheduler"] = scheduler.lr_scheduler_callable
+ else:
+ if self.deepspeed_config["zero_optimization"].get("offload_optimizer", {}).get(
+ "device", "none"
+ ) != "none" and self.deepspeed_config.get("zero_force_ds_cpu_optimizer", True):
+ from deepspeed.ops.adam import DeepSpeedCPUAdam
+
+ defaults = {k: v for k, v in optimizer.defaults.items() if k in ["lr", "weight_decay"]}
+ optimizer = DeepSpeedCPUAdam(optimizer.param_groups, **defaults)
+ kwargs["optimizer"] = optimizer
+ if scheduler is not None:
+ if type(scheduler).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES:
+ kwargs["lr_scheduler"] = scheduler
+
+ engine, optimizer, _, lr_scheduler = deepspeed.initialize(**kwargs)
+ if optimizer is not None:
+ optimizer = DeepSpeedOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ if lr_scheduler is None:
+ scheduler = AcceleratedScheduler(
+ scheduler,
+ optimizer,
+ step_with_optimizer=self.step_scheduler_with_optimizer,
+ split_batches=self.split_batches,
+ )
+ else:
+ scheduler = DeepSpeedSchedulerWrapper(lr_scheduler, optimizer)
+
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = engine
+ elif isinstance(result[i], (torch.optim.Optimizer, DummyOptim)):
+ result[i] = optimizer
+ elif (isinstance(result[i], (LRScheduler, DummyScheduler))) or (
+ type(result[i]).__name__ in deepspeed.runtime.lr_schedules.VALID_LR_SCHEDULES
+ ):
+ result[i] = scheduler
+ # pointing for deepspeed_engine_wrapped.backward()
+ self.deepspeed_engine_wrapped = DeepSpeedEngineWrapper(engine)
+ self._models.append(engine)
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ if len(self._models) > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using DeepSpeed"
+ )
+ return tuple(result)
+
+ def _prepare_megatron_lm(self, *args):
+ megatron_lm_plugin = self.state.megatron_lm_plugin
+ if not megatron_lm_plugin.megatron_dataset_flag:
+ batch_sizes = [obj.batch_size for obj in args if hasattr(obj, "batch_size")]
+ if len(batch_sizes) == 0:
+ raise ValueError(
+ "You must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM."
+ )
+
+ micro_batch_size = min(batch_sizes) if megatron_lm_plugin.is_train_batch_min else max(batch_sizes)
+ if len(batch_sizes) > 1:
+ logger.info(
+ "Since you passed both train and evaluation dataloader, `is_train_batch_min` (here "
+ f"{megatron_lm_plugin.is_train_batch_min} will decide the `train_batch_size` ({micro_batch_size})."
+ )
+ else:
+ for obj in args:
+ if isinstance(obj, MegatronLMDummyDataLoader):
+ micro_batch_size = obj.dataset_args["micro_batch_size"]
+ break
+
+ dp_degree = self.num_processes // (megatron_lm_plugin.tp_degree * megatron_lm_plugin.pp_degree)
+ megatron_lm_plugin.set_training_args(micro_batch_size, dp_degree)
+
+ model = None
+ optimizer = None
+ scheduler = None
+ is_dummy_scheduler = False
+ batch_data = None
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader) and batch_data is None:
+ batch_data = next(iter(obj))
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ elif isinstance(obj, (LRScheduler, MegatronLMDummyScheduler)):
+ scheduler = obj
+
+ if model is not None:
+ megatron_lm_plugin.set_network_size_args(model, batch_data)
+ if optimizer is not None:
+ megatron_lm_plugin.set_optimizer_type(optimizer)
+ if scheduler is not None:
+ is_dummy_scheduler = isinstance(scheduler, MegatronLMDummyScheduler)
+ if not is_dummy_scheduler:
+ raise ValueError(
+ "You can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead."
+ )
+ megatron_lm_plugin.set_scheduler_args(scheduler)
+
+ # initialize megatron-lm
+ megatron_lm_initialize(self, args_defaults=megatron_lm_plugin.megatron_lm_default_args)
+ counter = 0
+ result = []
+ for obj in args:
+ if isinstance(obj, torch.utils.data.DataLoader):
+ result.append(megatron_lm_prepare_data_loader(self, obj))
+ counter += 1
+ elif isinstance(obj, MegatronLMDummyDataLoader):
+ if counter == 0:
+ obj.set_megatron_data_args()
+ dataloaders = megatron_lm_prepare_data_loader(self, obj)
+ result.append(dataloaders[counter])
+ counter += 1
+ else:
+ result.append(obj)
+
+ if model is not None:
+ model = megatron_lm_prepare_model(self)
+ if optimizer is not None:
+ optimizer = megatron_lm_prepare_optimizer(self, model)
+ if scheduler is not None:
+ scheduler = megatron_lm_prepare_scheduler(self, optimizer, scheduler)
+
+ if model is not None:
+ model = MegatronEngine(self, model, optimizer, scheduler)
+ if optimizer is not None:
+ optimizer = MegatronLMOptimizerWrapper(optimizer)
+ if scheduler is not None:
+ scheduler = MegatronLMSchedulerWrapper(scheduler, optimizer)
+
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], torch.optim.Optimizer):
+ result[i] = optimizer
+ elif isinstance(result[i], MegatronLMDummyScheduler):
+ result[i] = scheduler
+ if model is not None:
+ self._models.append(model)
+ if optimizer is not None:
+ self._optimizers.append(optimizer)
+ if scheduler is not None:
+ self._schedulers.append(scheduler)
+ if len(self._models) > 1:
+ raise AssertionError(
+ "You can't use same `Accelerator()` instance with multiple models when using Megatron-LM"
+ )
+ return tuple(result)
+
+ def _prepare_ipex(self, *args):
+ if not is_ipex_available():
+ raise ImportError(
+ "IPEX is not installed or IPEX's version does not match current PyTorch version. Please refer"
+ " to https://github.com/intel/intel-extension-for-pytorch."
+ )
+ else:
+ import intel_extension_for_pytorch as ipex
+
+ model = None
+ optimizer = None
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ model.train()
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ if optimizer is not None and model is not None:
+ dtype = torch.bfloat16 if self.state.mixed_precision == "bf16" else None
+ if self.device.type == "xpu" and is_xpu_available():
+ model = model.to(self.device)
+ model, optimizer = torch.xpu.optimize(
+ model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1"
+ )
+ else:
+ model, optimizer = ipex.optimize(model, optimizer=optimizer, dtype=dtype, inplace=True, level="O1")
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], (torch.optim.Optimizer)):
+ result[i] = optimizer
+ return tuple(result)
+
+ def _prepare_msamp(self, *args):
+ if not is_msamp_available():
+ raise ImportError(
+ "MS-AMP was not found on your system. Please ensure that MS-AMP is available "
+ " or choose `'te'` as the backend for FP8 mixed precision training."
+ )
+ else:
+ import msamp
+
+ model, optimizer = None, None
+ num_models, num_optimizers = 0, 0
+ result = [obj for obj in args]
+ for obj in result:
+ if isinstance(obj, torch.nn.Module):
+ model = obj
+ num_models += 1
+ elif isinstance(obj, (torch.optim.Optimizer)):
+ optimizer = obj
+ num_optimizers += 1
+ if optimizer is None or model is None:
+ raise ValueError(
+ "You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP."
+ )
+ elif num_models > 1 or num_optimizers > 1:
+ raise ValueError(
+ f"You can't use multiple models ({num_models}) or optimizers {num_optimizers} with MS-AMP."
+ )
+ else:
+ model, optimizer = msamp.initialize(model, optimizer, opt_level=self.fp8_recipe_handler.opt_level)
+ for i in range(len(result)):
+ if isinstance(result[i], torch.nn.Module):
+ result[i] = model
+ elif isinstance(result[i], (torch.optim.Optimizer)):
+ result[i] = optimizer
+ return tuple(result)
+
+ def prepare_data_loader(
+ self, data_loader: torch.utils.data.DataLoader, device_placement=None, slice_fn_for_dispatch=None
+ ):
+ """
+ Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ data_loader (`torch.utils.data.DataLoader`):
+ A vanilla PyTorch DataLoader to prepare
+ device_placement (`bool`, *optional*):
+ Whether or not to place the batches on the proper device in the prepared dataloader. Will default to
+ `self.device_placement`.
+ slice_fn_for_dispatch (`Callable`, *optional*`):
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
+ be ignored otherwise.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> data_loader = torch.utils.data.DataLoader(...)
+ >>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True)
+ ```
+ """
+ # Ensure we can't double wrap a DataLoader due to `find_batch_size`
+ if getattr(data_loader, "_is_accelerate_prepared", False):
+ if data_loader not in self._dataloaders:
+ self._dataloaders.append(data_loader)
+ return data_loader
+ if device_placement is None:
+ device_placement = self.device_placement if self.distributed_type != DistributedType.XLA else False
+ prepared_data_loader = prepare_data_loader(
+ data_loader,
+ self.device,
+ num_processes=self.num_processes,
+ process_index=self.process_index,
+ split_batches=self.split_batches,
+ put_on_device=device_placement,
+ rng_types=self.rng_types.copy(),
+ dispatch_batches=self.dispatch_batches,
+ even_batches=self.even_batches,
+ slice_fn_for_dispatch=slice_fn_for_dispatch,
+ use_seedable_sampler=self.use_seedable_sampler,
+ )
+ self._dataloaders.append(prepared_data_loader)
+ return prepared_data_loader
+
+ def prepare_optimizer(self, optimizer: torch.optim.Optimizer, device_placement=None):
+ """
+ Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ optimizer (`torch.optim.Optimizer`):
+ A vanilla PyTorch optimizer to prepare
+ device_placement (`bool`, *optional*):
+ Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> optimizer = torch.optim.Adam(...)
+ >>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
+ ```
+ """
+ # Ensure we can't double wrap an optimizer due to `find_batch_size`
+ if getattr(optimizer, "_is_accelerate_prepared", False):
+ if optimizer not in self._optimizers:
+ self._optimizers.append(optimizer)
+ return optimizer
+ if device_placement is None:
+ device_placement = self.device_placement
+ optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler)
+ self._optimizers.append(optimizer)
+ return optimizer
+
+ def prepare_scheduler(self, scheduler: LRScheduler):
+ """
+ Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
+ [`Accelerator.prepare`] instead.
+
+ Args:
+ scheduler (`torch.optim.lr_scheduler.LRScheduler`):
+ A vanilla PyTorch scheduler to prepare
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> optimizer = torch.optim.Adam(...)
+ >>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
+ >>> scheduler = accelerator.prepare_scheduler(scheduler)
+ ```
+ """
+ # Ensure we can't double wrap a scheduler due to `find_batch_size`
+ if getattr(scheduler, "_is_accelerate_prepared", False):
+ if scheduler not in self._schedulers:
+ self._schedulers.append(scheduler)
+ return scheduler
+ # We try to find the optimizer associated with `scheduler`, the default is the full list.
+ optimizer = self._optimizers
+ for opt in self._optimizers:
+ if getattr(scheduler, "optimizer", None) == opt.optimizer:
+ optimizer = opt
+ break
+ scheduler = AcceleratedScheduler(
+ scheduler,
+ optimizer,
+ step_with_optimizer=self.step_scheduler_with_optimizer,
+ split_batches=self.split_batches,
+ )
+ self._schedulers.append(scheduler)
+ return scheduler
+
+ def backward(self, loss, **kwargs):
+ """
+ Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
+ on the configuration.
+
+ Should be used in lieu of `loss.backward()`.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> outputs = model(inputs)
+ >>> loss = loss_fn(outputs, labels)
+ >>> accelerator.backward(loss)
+ ```
+ """
+ if self.distributed_type != DistributedType.DEEPSPEED:
+ # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
+ loss = loss / self.gradient_accumulation_steps
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ self.deepspeed_engine_wrapped.backward(loss, **kwargs)
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ return
+ elif self.scaler is not None:
+ self.scaler.scale(loss).backward(**kwargs)
+ else:
+ loss.backward(**kwargs)
+
+ def set_trigger(self):
+ """
+ Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
+ will check across all processes.
+
+ Note:
+ Does not require `wait_for_everyone()`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume later in the training script
+ >>> # `should_do_breakpoint` is a custom function to monitor when to break,
+ >>> # e.g. when the loss is NaN
+ >>> if should_do_breakpoint(loss):
+ ... accelerator.set_trigger()
+ >>> # Assume later in the training script
+ >>> if accelerator.check_breakpoint():
+ ... break
+ ```
+ """
+ self.flag_tensor = torch.tensor(1, device=self.device)
+
+ def check_trigger(self):
+ """
+ Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
+ reset the trigger tensor to 0.
+
+ Note:
+ Does not require `wait_for_everyone()`
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume later in the training script
+ >>> # `should_do_breakpoint` is a custom function to monitor when to break,
+ >>> # e.g. when the loss is NaN
+ >>> if should_do_breakpoint(loss):
+ ... accelerator.set_trigger()
+ >>> # Assume later in the training script
+ >>> if accelerator.check_trigger():
+ ... break
+ ```
+ """
+ # Now that we are outside `__init__`, we can initialize it if it is `None` on device
+ if self.flag_tensor is None:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ flag_tensor = self.reduce(self.flag_tensor)
+ if flag_tensor.item() >= 1:
+ self.flag_tensor = torch.tensor(0, device=self.device)
+ return True
+ return False
+
+ def unscale_gradients(self, optimizer=None):
+ """
+ Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.
+
+ Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`]
+
+ Args:
+ optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*):
+ The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
+ that were passed to [`~Accelerator.prepare`].
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer = accelerator.prepare(model, optimizer)
+ >>> outputs = model(inputs)
+ >>> loss = loss_fn(outputs, labels)
+ >>> accelerator.backward(loss)
+ >>> accelerator.unscale_gradients(optimizer=optimizer)
+ ```
+ """
+ if self.native_amp and self.mixed_precision == "fp16":
+ if optimizer is None:
+ # TODO: this unscales all optimizers where we should only unscale the one where parameters are.
+ optimizer = self._optimizers
+ elif not isinstance(optimizer, (tuple, list)):
+ optimizer = [optimizer]
+ for opt in optimizer:
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ self.scaler.unscale_(opt)
+
+ def clip_grad_norm_(self, parameters, max_norm, norm_type=2):
+ """
+ Should be used in place of `torch.nn.utils.clip_grad_norm_`.
+
+ Returns:
+ `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... if accelerator.sync_gradients:
+ ... accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
+ ... optimizer.step()
+ ```
+ """
+ if self.distributed_type == DistributedType.FSDP:
+ self.unscale_gradients()
+ parameters = [p for p in parameters]
+ for model in self._models:
+ if parameters == [p for p in model.parameters()]:
+ return model.clip_grad_norm_(max_norm, norm_type)
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
+ # We cannot return the gradient norm because DeepSpeed does it.
+ return None
+ elif self.distributed_type == DistributedType.XLA:
+ # Reduce gradients first for XLA
+ for acc_opt in self._optimizers:
+ if not acc_opt.gradient_state.is_xla_gradients_synced:
+ opt = acc_opt
+ while isinstance(opt, AcceleratedOptimizer):
+ opt = opt.optimizer
+ gradients = xm._fetch_gradients(opt)
+ # Use xm.all_reduce to perform an in-place all-reduce. Recusrsive all-reduce each tensor
+ # one by one in self.reduce is non-inplace.
+ xm.all_reduce("sum", gradients, scale=1.0 / self.num_processes)
+ # Set is_xla_gradients_synced to True to avoid all-reduce twice in the AcceleratedOptimizer step.
+ acc_opt.gradient_state.is_xla_gradients_synced = True
+ self.unscale_gradients()
+ return torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=norm_type)
+
+ def clip_grad_value_(self, parameters, clip_value):
+ """
+ Should be used in place of `torch.nn.utils.clip_grad_value_`.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(gradient_accumulation_steps=2)
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... if accelerator.sync_gradients:
+ ... accelerator.clip_grad_value_(model.parameters(), clip_value)
+ ... optimizer.step()
+ ```
+ """
+ if self.distributed_type in [DistributedType.DEEPSPEED, DistributedType.FSDP]:
+ raise Exception("DeepSpeed and FSDP do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.")
+ self.unscale_gradients()
+ torch.nn.utils.clip_grad_value_(parameters, clip_value)
+
+ def gather(self, tensor):
+ """
+ Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
+ regroup the predictions from all processes when doing evaluation.
+
+ Note:
+ This gather happens in all processes.
+
+ Args:
+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
+ The tensors to gather across all processes.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the
+ first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.
+
+ Example:
+
+ ```python
+ >>> # Assuming four processes
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.tensor([accelerator.process_index])
+ >>> gathered_tensor = accelerator.gather(process_tensor)
+ >>> gathered_tensor
+ tensor([0, 1, 2, 3])
+ ```
+ """
+ return gather(tensor)
+
+ def gather_for_metrics(self, input_data):
+ """
+ Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
+ used for gathering the inputs and targets for metric calculation.
+
+ Args:
+ input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
+ The tensors or objects for calculating metrics across all processes
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5)
+ >>> dataloader = accelerator.prepare(dataloader)
+ >>> batch = next(iter(dataloader))
+ >>> gathered_items = accelerator.gather_for_metrics(batch)
+ >>> len(gathered_items)
+ 9
+ ```
+ """
+
+ try:
+ recursively_apply(lambda x: x, input_data, error_on_other_type=True)
+ all_tensors = True
+ except TypeError:
+ all_tensors = False
+
+ if not all_tensors:
+ data = gather_object(input_data)
+ else:
+ data = self.gather(input_data)
+
+ try:
+ if self.gradient_state.end_of_dataloader:
+ # at the end of a dataloader, `gather_for_metrics` regresses to
+ # `gather` unless the dataset has a remainder so log.
+ if self.gradient_state.remainder == -1:
+ logger.info(
+ "The used dataset had no length, returning gathered tensors. You should drop the remainder yourself."
+ )
+ return data
+ elif self.gradient_state.remainder > 0:
+ # Last batch needs to be truncated on distributed systems as it contains additional samples
+ def _adjust_samples(tensor):
+ return tensor[: self.gradient_state.remainder]
+
+ return recursively_apply(_adjust_samples, data)
+ else: # remainder is 0
+ # no remainder even though at end of dataloader, so nothing to do.
+ return data
+ else:
+ # Not at the end of the dataloader, no need to adjust the tensors
+ return data
+ except Exception:
+ # Dataset had no length or raised an error
+ return data
+
+ def reduce(self, tensor, reduction="sum", scale=1.0):
+ """
+ Reduce the values in *tensor* across all processes based on *reduction*.
+
+ Note:
+ All processes get the reduced value.
+
+ Args:
+ tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
+ The tensors to reduce across all processes.
+ reduction (`str`, *optional*, defaults to "sum"):
+ A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
+ scale (`float`, *optional*, defaults to 1.0):
+ A default scaling value to be applied after the reduce, only valied on XLA.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
+ The reduced tensor(s).
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index)
+ >>> process_tensor = process_tensor.to(accelerator.device)
+ >>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum")
+ >>> reduced_tensor
+ tensor([4, 6])
+ ```
+ """
+ return reduce(tensor, reduction, scale)
+
+ def pad_across_processes(self, tensor, dim=0, pad_index=0, pad_first=False):
+ """
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
+ they can safely be gathered.
+
+ Args:
+ tensor (nested list/tuple/dictionary of `torch.Tensor`):
+ The data to gather.
+ dim (`int`, *optional*, defaults to 0):
+ The dimension on which to pad.
+ pad_index (`int`, *optional*, defaults to 0):
+ The value with which to pad.
+ pad_first (`bool`, *optional*, defaults to `False`):
+ Whether to pad at the beginning or the end.
+
+ Returns:
+ `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
+ The padded tensor(s).
+
+ Example:
+
+ ```python
+ >>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device)
+ >>> padded_tensor = accelerator.pad_across_processes(process_tensor)
+ >>> padded_tensor.shape
+ torch.Size([2])
+ ```
+ """
+ return pad_across_processes(tensor, dim=dim, pad_index=pad_index, pad_first=pad_first)
+
+ def unwrap_model(self, model, keep_fp32_wrapper: bool = True):
+ """
+ Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
+ the model.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to unwrap.
+ keep_fp32_wrapper (`bool`, *optional*, defaults to `True`):
+ Whether to not remove the mixed precision hook if it was added.
+
+ Returns:
+ `torch.nn.Module`: The unwrapped model.
+
+ Example:
+
+ ```python
+ >>> # Assuming two GPU processes
+ >>> from torch.nn.parallel import DistributedDataParallel
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model = accelerator.prepare(MyModel())
+ >>> print(model.__class__.__name__)
+ DistributedDataParallel
+
+ >>> model = accelerator.unwrap_model(model)
+ >>> print(model.__class__.__name__)
+ MyModel
+ ```
+ """
+ return extract_model_from_parallel(model, keep_fp32_wrapper)
+
+ def wait_for_everyone(self):
+ """
+ Will stop the execution of the current process until every other process has reached that point (so this does
+ nothing when the script is only run in one process). Useful to do before saving a model.
+
+ Example:
+
+ ```python
+ >>> # Assuming two GPU processes
+ >>> import time
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> if accelerator.is_main_process:
+ ... time.sleep(2)
+ >>> else:
+ ... print("I'm waiting for the main process to finish its sleep...")
+ >>> accelerator.wait_for_everyone()
+ >>> # Should print on every process at the same time
+ >>> print("Everyone is here")
+ ```
+ """
+ wait_for_everyone()
+
+ @on_main_process
+ def init_trackers(self, project_name: str, config: dict | None = None, init_kwargs: dict | None = {}):
+ """
+ Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations
+
+ Args:
+ project_name (`str`):
+ The name of the project. All trackers will save their data based on this
+ config (`dict`, *optional*):
+ Optional starting configuration to be logged.
+ init_kwargs (`dict`, *optional*):
+ A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be
+ formatted like so:
+ ```python
+ {"wandb": {"tags": ["tag_a", "tag_b"]}}
+ ```
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers(
+ ... project_name="my_project",
+ ... config={"learning_rate": 0.001, "batch_size": 32},
+ ... init_kwargs={"tensorboard": {"flush_secs": 60}},
+ ... )
+ ```
+ """
+ for tracker in self.log_with:
+ if issubclass(type(tracker), GeneralTracker):
+ # Custom trackers are already initialized
+ self.trackers.append(tracker)
+ else:
+ tracker_init = LOGGER_TYPE_TO_CLASS[str(tracker)]
+ if tracker_init.requires_logging_directory:
+ # We can skip this check since it was done in `__init__`
+ self.trackers.append(
+ tracker_init(project_name, self.logging_dir, **init_kwargs.get(str(tracker), {}))
+ )
+ else:
+ self.trackers.append(tracker_init(project_name, **init_kwargs.get(str(tracker), {})))
+ if config is not None:
+ for tracker in self.trackers:
+ tracker.store_init_configuration(config)
+
+ def get_tracker(self, name: str, unwrap: bool = False):
+ """
+ Returns a `tracker` from `self.trackers` based on `name` on the main process only.
+
+ Args:
+ name (`str`):
+ The name of a tracker, corresponding to the `.name` property.
+ unwrap (`bool`):
+ Whether to return the internal tracking mechanism or to return the wrapped tracker instead
+ (recommended).
+
+ Returns:
+ `GeneralTracker`: The tracker corresponding to `name` if it exists.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
+ ```
+ """
+ if len(self.trackers) > 0:
+ for tracker in self.trackers:
+ if tracker.name == name:
+ return tracker.tracker if unwrap else tracker
+ raise ValueError(f"{name} is not an available tracker stored inside the `Accelerator`.")
+ # Handle tracker only made on main process
+ return GeneralTracker(_blank=True)
+
+ @on_main_process
+ def log(self, values: dict, step: int | None = None, log_kwargs: dict | None = {}):
+ """
+ Logs `values` to all stored trackers in `self.trackers` on the main process only.
+
+ Args:
+ values (`dict`):
+ Values should be a dictionary-like object containing only types `int`, `float`, or `str`.
+ step (`int`, *optional*):
+ The run step. If included, the log will be affiliated with this step.
+ log_kwargs (`dict`, *optional*):
+ A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted
+ like so:
+ ```python
+ {"wandb": {"tags": ["tag_a", "tag_b"]}}
+ ```
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> accelerator.log({"loss": 0.5, "accuracy": 0.9})
+ ```
+ """
+ for tracker in self.trackers:
+ tracker.log(values, step=step, **log_kwargs.get(tracker.name, {}))
+
+ @on_main_process
+ def end_training(self):
+ """
+ Runs any special end training behaviors, such as stopping trackers on the main process only. Should always be
+ called at the end of your script if using experiment tracking.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(log_with="tensorboard")
+ >>> accelerator.init_trackers("my_project")
+ >>> # Do training
+ >>> accelerator.end_training()
+ ```
+ """
+ for tracker in self.trackers:
+ tracker.finish()
+
+ def save(self, obj, f, safe_serialization=False):
+ """
+ Save the object passed to disk once per machine. Use in place of `torch.save`.
+
+ Args:
+ obj (`object`): The object to save.
+ f (`str` or `os.PathLike`): Where to save the content of `obj`.
+ safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`
+
+ Note:
+ If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
+ rather than only once on the main node.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> arr = [0, 1, 2, 3]
+ >>> accelerator.save(arr, "array.pkl")
+ ```
+ """
+ save(
+ obj,
+ f,
+ save_on_each_node=self.project_configuration.save_on_each_node,
+ safe_serialization=safe_serialization,
+ )
+
+ def save_model(
+ self,
+ model: torch.nn.Module,
+ save_directory: Union[str, os.PathLike],
+ max_shard_size: Union[int, str] = "10GB",
+ safe_serialization: bool = True,
+ ):
+ """
+ Save a model so that it can be re-loaded using load_checkpoint_in_model
+
+ Arguments:
+ model: (`torch.nn.Module`):
+ Model to be saved. The model can be wrapped or unwraped.
+ save_directory (`str` or `os.PathLike`):
+ Directory to which to save. Will be created if it doesn't exist.
+ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
+ The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
+ lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
+
+
+
+ If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
+ which will be bigger than `max_shard_size`.
+
+
+
+ safe_serialization (`bool`, *optional*, defaults to `True`):
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model = ...
+ >>> accelerator.save_model(model, save_directory)
+ ```
+ """
+
+ if os.path.isfile(save_directory):
+ logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
+ return
+
+ os.makedirs(save_directory, exist_ok=True)
+
+ # get the state_dict of the model
+ if any(
+ [
+ module._hf_hook.offload
+ for module in model.modules()
+ if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook)
+ ]
+ ):
+ state_dict = get_state_dict_offloaded_model(model)
+ else:
+ if any(param.device == torch.device("meta") for param in model.parameters()):
+ raise RuntimeError("You can't save the model since some parameters are on the meta device.")
+ state_dict = self.get_state_dict(model)
+
+ if safe_serialization:
+ state_dict = clean_state_dict_for_safetensors(state_dict)
+ weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
+
+ # Shard the model if it is too big.
+ shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
+
+ # Clean the folder from a previous save
+ for filename in os.listdir(save_directory):
+ full_filename = os.path.join(save_directory, filename)
+ # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
+ # in distributed settings to avoid race conditions.
+ weights_no_suffix = weights_name.replace(".bin", "")
+
+ # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
+ filename_no_suffix = filename.replace(".bin", "")
+ reg = re.compile(r"(.*?)-\d{5}-of-\d{5}")
+
+ if (
+ filename.startswith(weights_no_suffix)
+ and os.path.isfile(full_filename)
+ and filename not in shards.keys()
+ and reg.fullmatch(filename_no_suffix) is not None
+ and PartialState().is_main_process
+ ):
+ os.remove(full_filename)
+
+ # Save the model
+ for shard_file, shard in shards.items():
+ self.save(shard, os.path.join(save_directory, shard_file), safe_serialization=safe_serialization)
+
+ if index is None:
+ path_to_weights = os.path.join(save_directory, WEIGHTS_NAME)
+ logger.info(f"Model weights saved in {path_to_weights}")
+ else:
+ save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
+ save_index_file = os.path.join(save_directory, save_index_file)
+ # Save the index as well
+ with open(save_index_file, "w", encoding="utf-8") as f:
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
+ f.write(content)
+ logger.info(
+ f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
+ f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
+ f"index located at {save_index_file}."
+ )
+
+ def register_save_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ """
+ Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].
+
+ Args:
+ hook (`Callable`):
+ A function to be called in [`Accelerator.save_state`] before `save_checkpoint`.
+
+ The hook should have the following signature:
+
+ `hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`
+
+ The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weigths`
+ argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
+ to [`Accelerator.load_state`].
+
+
+
+ Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save
+ configurations in addition to model weights. Can also be used to overwrite model saving with a customized
+ method. In this case, make sure to remove already loaded weights from the weights list.
+
+
+
+ Returns:
+ `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
+ `handle.remove()`
+ """
+ handle = hooks.RemovableHandle(self._save_model_state_pre_hook)
+ self._save_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def save_state(self, output_dir: str = None, safe_serialization: bool = True, **save_model_func_kwargs):
+ """
+ Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.
+
+ If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
+ then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
+ than `total_limit` then the oldest save is deleted. Each checkpoint is saved in seperate folders named
+ `checkpoint_`.
+
+ Otherwise they are just saved to `output_dir`.
+
+
+
+ Should only be used when wanting to save a checkpoint during training and restoring the state in the same
+ environment.
+
+
+
+ Args:
+ output_dir (`str` or `os.PathLike`):
+ The name of the folder to save all relevant weights and states.
+ safe_serialization (`bool`, *optional*, defaults to `True`):
+ Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
+ save_model_func_kwargs (`dict`, *optional*):
+ Additional keyword arguments for saving model which can be passed to the underlying save function, such
+ as optional arguments for DeepSpeed's `save_checkpoint` function.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, lr_scheduler = ...
+ >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
+ >>> accelerator.save_state(output_dir="my_checkpoint")
+ ```
+ """
+ if self.project_configuration.automatic_checkpoint_naming:
+ output_dir = os.path.join(self.project_dir, "checkpoints")
+ os.makedirs(output_dir, exist_ok=True)
+ if self.project_configuration.automatic_checkpoint_naming:
+ folders = [os.path.join(output_dir, folder) for folder in os.listdir(output_dir)]
+ if (
+ self.project_configuration.total_limit is not None
+ and (len(folders) + 1 > self.project_configuration.total_limit)
+ and self.is_main_process
+ ):
+
+ def _inner(folder):
+ return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
+
+ folders.sort(key=_inner)
+ logger.warning(
+ f"Deleting {len(folders) + 1 - self.project_configuration.total_limit} checkpoints to make room for new checkpoint."
+ )
+ for folder in folders[: len(folders) + 1 - self.project_configuration.total_limit]:
+ shutil.rmtree(folder)
+ output_dir = os.path.join(output_dir, f"checkpoint_{self.save_iteration}")
+ if os.path.exists(output_dir):
+ raise ValueError(
+ f"Checkpoint directory {output_dir} ({self.save_iteration}) already exists. Please manually override `self.save_iteration` with what iteration to start with."
+ )
+ self.wait_for_everyone()
+ os.makedirs(output_dir, exist_ok=True)
+ logger.info(f"Saving current state to {output_dir}")
+
+ if self.distributed_type == DistributedType.XLA:
+ # Finish running the previous step before checkpointing
+ xm.mark_step()
+
+ # Save the models taking care of FSDP and DeepSpeed nuances
+ weights = []
+ for i, model in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info("Saving FSDP model")
+ save_fsdp_model(self.state.fsdp_plugin, self, model, output_dir, i)
+ logger.info(f"FSDP Model saved to output dir {output_dir}")
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info("Saving DeepSpeed Model and Optimizer")
+ ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}"
+ model.save_checkpoint(output_dir, ckpt_id, **save_model_func_kwargs)
+ logger.info(f"DeepSpeed Model and Optimizer saved to output dir {os.path.join(output_dir, ckpt_id)}")
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info("Saving Megatron-LM Model, Optimizer and Scheduler")
+ model.save_checkpoint(output_dir)
+ logger.info(f"Megatron-LM Model , Optimizer and Scheduler saved to output dir {output_dir}")
+ else:
+ weights.append(self.get_state_dict(model, unwrap=False))
+
+ # Save the optimizers taking care of FSDP and DeepSpeed nuances
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for i, opt in enumerate(self._optimizers):
+ logger.info("Saving FSDP Optimizer")
+ save_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], output_dir, i)
+ logger.info(f"FSDP Optimizer saved to output dir {output_dir}")
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+
+ # Save the lr schedulers taking care of DeepSpeed nuances
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for i, scheduler in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+
+ # Save the samplers of the dataloaders
+ dataloaders = self._dataloaders
+
+ # Call model loading hooks that might have been registered with
+ # accelerator.register_model_state_hook
+ for hook in self._save_model_state_pre_hook.values():
+ hook(self._models, weights, output_dir)
+
+ save_location = save_accelerator_state(
+ output_dir,
+ weights,
+ optimizers,
+ schedulers,
+ dataloaders,
+ self.state.process_index,
+ self.scaler,
+ save_on_each_node=self.project_configuration.save_on_each_node,
+ safe_serialization=safe_serialization,
+ )
+ for i, obj in enumerate(self._custom_objects):
+ save_custom_state(obj, output_dir, i, save_on_each_node=self.project_configuration.save_on_each_node)
+ self.project_configuration.iteration += 1
+ return save_location
+
+ def register_load_state_pre_hook(self, hook: Callable[..., None]) -> hooks.RemovableHandle:
+ """
+ Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].
+
+ Args:
+ hook (`Callable`):
+ A function to be called in [`Accelerator.load_state`] before `load_checkpoint`.
+
+ The hook should have the following signature:
+
+ `hook(models: list[torch.nn.Module], input_dir: str) -> None`
+
+ The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the
+ `input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`].
+
+
+
+ Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load
+ configurations in addition to model weights. Can also be used to overwrite model loading with a customized
+ method. In this case, make sure to remove already loaded models from the models list.
+
+
+
+ Returns:
+ `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
+ `handle.remove()`
+ """
+ handle = hooks.RemovableHandle(self._load_model_state_pre_hook)
+ self._load_model_state_pre_hook[handle.id] = hook
+ return handle
+
+ def load_state(self, input_dir: str = None, **load_model_func_kwargs):
+ """
+ Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.
+
+
+
+ Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for
+ checkpointing, it will not be loaded if stored in the directory.
+
+
+
+ Args:
+ input_dir (`str` or `os.PathLike`):
+ The name of the folder all relevant weights and states were saved in. Can be `None` if
+ `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint.
+ load_model_func_kwargs (`dict`, *optional*):
+ Additional keyword arguments for loading model which can be passed to the underlying load function,
+ such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
+ model and optimizer on.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, lr_scheduler = ...
+ >>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
+ >>> accelerator.load_state("my_checkpoint")
+ ```
+ """
+ if input_dir is not None:
+ # Check if folder exists
+ input_dir = os.path.expanduser(input_dir)
+ if not os.path.isdir(input_dir):
+ raise ValueError(f"Tried to find {input_dir} but folder does not exist")
+ elif self.project_configuration.automatic_checkpoint_naming:
+ # Pick up from automatic checkpoint naming
+ input_dir = os.path.join(self.project_dir, "checkpoints")
+ folders = [os.path.join(input_dir, folder) for folder in os.listdir(input_dir)]
+
+ def _inner(folder):
+ return list(map(int, re.findall(r"[\/]?([0-9]+)(?=[^\/]*$)", folder)))[0]
+
+ folders.sort(key=_inner)
+ input_dir = folders[-1]
+ else:
+ raise ValueError("No input_dir provided and automatic checkpoint naming is disabled.")
+ logger.info(f"Loading states from {input_dir}")
+
+ # Load the models taking care of FSDP and DeepSpeed nuances
+ models = []
+ for i, model in enumerate(self._models):
+ if self.distributed_type == DistributedType.FSDP:
+ logger.info("Loading FSDP model")
+ load_fsdp_model(self.state.fsdp_plugin, self, model, input_dir, i)
+ logger.info(f"FSDP Model loaded from input dir {input_dir}")
+ elif self.distributed_type == DistributedType.DEEPSPEED:
+ logger.info("Loading DeepSpeed Model and Optimizer")
+ ckpt_id = f"{MODEL_NAME}" if i == 0 else f"{MODEL_NAME}_{i}"
+ model.load_checkpoint(input_dir, ckpt_id, **load_model_func_kwargs)
+ logger.info(f"DeepSpeed Model and Optimizer loaded from input dir {os.path.join(input_dir, ckpt_id)}")
+ elif self.distributed_type == DistributedType.MEGATRON_LM:
+ logger.info("Loading Megatron-LM Model, Optimizer and Scheduler")
+ model.load_checkpoint(input_dir)
+ logger.info(f"Megatron-LM Model , Optimizer and Scheduler loaded from input dir {input_dir}")
+ else:
+ models.append(model)
+
+ # Load the optimizers taking care of FSDP and DeepSpeed nuances
+ optimizers = []
+ if self.distributed_type == DistributedType.FSDP:
+ for i, opt in enumerate(self._optimizers):
+ logger.info("Loading FSDP Optimizer")
+ load_fsdp_optimizer(self.state.fsdp_plugin, self, opt, self._models[i], input_dir, i)
+ logger.info(f"FSDP Optimizer loaded from input dir {input_dir}")
+ elif self.distributed_type not in [DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ optimizers = self._optimizers
+
+ # Load the lr schedulers taking care of DeepSpeed nuances
+ schedulers = []
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ for i, scheduler in enumerate(self._schedulers):
+ if isinstance(scheduler, DeepSpeedSchedulerWrapper):
+ continue
+ schedulers.append(scheduler)
+ elif self.distributed_type not in [DistributedType.MEGATRON_LM]:
+ schedulers = self._schedulers
+
+ dataloaders = self._dataloaders
+
+ # Call model loading hooks that might have been registered with
+ # accelerator.register_model_state_hook
+ for hook in self._load_model_state_pre_hook.values():
+ hook(models, input_dir)
+
+ map_location = load_model_func_kwargs.pop("map_location", None)
+ if map_location is None:
+ if self.num_processes > 1 and self.distributed_type in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ ):
+ map_location = "on_device"
+ else:
+ map_location = "cpu"
+
+ load_accelerator_state(
+ input_dir,
+ models,
+ optimizers,
+ schedulers,
+ dataloaders,
+ self.state.process_index,
+ self.scaler,
+ map_location,
+ **load_model_func_kwargs,
+ )
+ custom_checkpoints = [
+ f for f in os.listdir(input_dir) if re.search(r"^custom_checkpoint_\d+\.pkl$", f) is not None
+ ]
+ if len(custom_checkpoints) != len(self._custom_objects):
+ err = "Number of custom checkpoints in folder {input_dir} does not match the number of registered objects:"
+ err += f"\n\tFound checkpoints: {len(custom_checkpoints)}"
+ err += f"\n\tRegistered objects: {len(self._custom_objects)}\n"
+ err += "Please make sure to only load checkpoints from folders that were created with the same set of registered objects,"
+ err += "or avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually."
+ raise RuntimeError(err)
+ else:
+ logger.info(f"Loading in {len(custom_checkpoints)} custom states")
+ for index, obj in enumerate(self._custom_objects):
+ load_custom_state(obj, input_dir, index)
+
+ def free_memory(self):
+ """
+ Will release all references to the internal objects stored and call the garbage collector. You should call this
+ method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, scheduler = ...
+ >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
+ >>> accelerator.free_memory()
+ >>> del model, optimizer, scheduler
+ ```
+ """
+ self._schedulers = []
+ self._optimizers = []
+ self._models = []
+ self._dataloaders = []
+ self.deepspeed_engine_wrapped = None
+ self.step = 0
+ release_memory()
+
+ def clear(self):
+ """
+ Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
+ garbage collector. You should call this method between two trainings with different models/optimizers.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> model, optimizer, scheduler = ...
+ >>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
+ >>> accelerator.free_memory()
+ >>> del model, optimizer, scheduler
+ ```
+ """
+ self.free_memory()
+
+ def _get_named_parameters(self, *args):
+ named_parameters = {}
+ for obj in args:
+ if isinstance(obj, torch.nn.Module):
+ obj = extract_model_from_parallel(obj)
+ named_parameters.update({n: p for n, p in obj.named_parameters()})
+ return named_parameters
+
+ def _get_devices(self, *args):
+ model_device = None
+ optimizer_device = None
+ for obj in args:
+ # Loop through model parameters and stop at the first once we have its device.
+ if isinstance(obj, torch.nn.Module):
+ for param in obj.parameters():
+ model_device = param.device
+ break
+ # Loop through optimizer parameters groups and stop at the first once we have its device.
+ if isinstance(obj, torch.optim.Optimizer):
+ for param_group in obj.param_groups:
+ if len(param_group["params"]) > 0:
+ optimizer_device = param_group["params"][0].device
+ break
+ return (model_device, optimizer_device)
+
+ def get_state_dict(self, model, unwrap=True):
+ """
+ Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
+ precision.
+
+ Args:
+ model (`torch.nn.Module`):
+ A PyTorch model sent through [`Accelerator.prepare`]
+ unwrap (`bool`, *optional*, defaults to `True`):
+ Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict
+
+ Returns:
+ `dict`: The state dictionary of the model potentially without full precision.
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> net = torch.nn.Linear(2, 2)
+ >>> net = accelerator.prepare(net)
+ >>> state_dict = accelerator.get_state_dict(net)
+ ```
+ """
+
+ if self.distributed_type == DistributedType.DEEPSPEED:
+ if self.deepspeed_config["zero_optimization"]["stage"] == 3:
+ if model.zero_gather_16bit_weights_on_model_save():
+ state_dict = model._zero3_consolidated_16bit_state_dict()
+ else:
+ raise ValueError(
+ "Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. "
+ "To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or "
+ "set `zero3_save_16bit_model` to True when using `accelerate config`. "
+ "To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights."
+ )
+ else:
+ from deepspeed.checkpoint.utils import clone_tensors_for_torch_save
+
+ state_dict = clone_tensors_for_torch_save(self.unwrap_model(model).state_dict())
+ elif self.distributed_type == DistributedType.FSDP:
+ from torch.distributed.fsdp import FullStateDictConfig, StateDictType
+ from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
+
+ full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, full_state_dict_config):
+ state_dict = model.state_dict()
+ else:
+ if unwrap:
+ model = self.unwrap_model(model)
+ state_dict = model.state_dict()
+
+ return state_dict
+
+ def register_for_checkpointing(self, *objects):
+ """
+ Makes note of `objects` and will save or load them in during `save_state` or `load_state`.
+
+ These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
+ used in different scripts.
+
+
+
+ Every `object` must have a `load_state_dict` and `state_dict` function to be stored.
+
+
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function.
+ >>> obj = CustomObject()
+ >>> accelerator.register_for_checkpointing(obj)
+ >>> accelerator.save_state("checkpoint.pt")
+ ```
+ """
+ invalid_objects = []
+ for obj in objects:
+ if not hasattr(obj, "state_dict") or not hasattr(obj, "load_state_dict"):
+ invalid_objects.append(obj)
+ if len(invalid_objects) > 0:
+ err = "All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:"
+ for index, obj in enumerate(invalid_objects):
+ err += f"\n\t- Item at index {index}, `{get_pretty_name(obj)}`"
+ raise ValueError(err)
+ self._custom_objects.extend(objects)
+
+ @contextmanager
+ def autocast(self, cache_enabled: bool = False, autocast_handler: AutocastKwargs = None):
+ """
+ Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
+ different will happen otherwise.
+
+ A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is
+ useful in blocks under `autocast` where you want to revert to fp32.
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator(mixed_precision="fp16")
+ >>> with accelerator.autocast():
+ ... train()
+ ```
+ """
+ if cache_enabled:
+ warnings.warn(
+ "Passing `cache_enabled=True` to `accelerator.autocast` is deprecated and will be removed in v0.23.0. "
+ "Please use the `AutocastKwargs` class instead and pass it to the `Accelerator` as a `kwarg_handler`.",
+ FutureWarning,
+ )
+ if self.autocast_handler is not None:
+ self.autocast_handler.cache_enabled = True
+ else:
+ self.autocast_handler = AutocastKwargs(cache_enabled=True)
+ if autocast_handler is None:
+ autocast_handler = self.autocast_handler
+ autocast_context = get_mixed_precision_context_manager(self.native_amp, autocast_handler)
+ autocast_context.__enter__()
+ # TODO: should the `yield` be in a try/finally block?
+ yield
+ autocast_context.__exit__(*sys.exc_info())
+
+ @property
+ def optimizer_step_was_skipped(self):
+ """
+ Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which
+ case the learning rate should not be changed.
+ """
+ for optimizer in self._optimizers:
+ if optimizer.step_was_skipped:
+ return True
+ return False
+
+ def skip_first_batches(self, dataloader, num_batches: int = 0):
+ """
+ Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
+
+ Args:
+ dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches.
+ num_batches (`int`, *optional*, defaults to 0): The number of batches to skip
+
+ Example:
+
+ ```python
+ >>> from accelerate import Accelerator
+
+ >>> accelerator = Accelerator()
+ >>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
+ >>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2)
+ >>> # for the first epoch only
+ >>> for input, target in skipped_dataloader:
+ ... optimizer.zero_grad()
+ ... output = model(input)
+ ... loss = loss_func(output, target)
+ ... accelerator.backward(loss)
+ ... optimizer.step()
+
+ >>> # subsequent epochs
+ >>> for input, target in dataloader:
+ ... optimizer.zero_grad()
+ ... ...
+ ```
+ """
+ return skip_first_batches(dataloader, num_batches=num_batches)
+
+ def __deepcopy__(self, memo):
+ logger.info("Deep copying the `Accelerator` object, note that this will point to the same original object.")
+ return self
+
+ def verify_device_map(self, model: torch.nn.Module) -> bool:
+ """
+ Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
+ """
+ # Checks if any of the child modules has the attribute `hf_device_map` and this map has more than one entry.
+ for m in model.modules():
+ if hasattr(m, "hf_device_map") and len(m.hf_device_map) > 1:
+ return True
+
+ return False
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/big_modeling.py b/env-llmeval/lib/python3.10/site-packages/accelerate/big_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..95c4ff94f1ba959ed83b826511ff1602dc115f05
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/big_modeling.py
@@ -0,0 +1,622 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+from contextlib import contextmanager
+from functools import wraps
+from typing import Dict, List, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from .hooks import (
+ AlignDevicesHook,
+ CpuOffload,
+ UserCpuOffloadHook,
+ add_hook_to_module,
+ attach_align_device_hook,
+ attach_align_device_hook_on_blocks,
+)
+from .utils import (
+ OffloadedWeightsLoader,
+ check_cuda_p2p_ib_support,
+ check_device_map,
+ extract_submodules_state_dict,
+ find_tied_parameters,
+ get_balanced_memory,
+ infer_auto_device_map,
+ is_mlu_available,
+ is_npu_available,
+ is_torch_version,
+ is_xpu_available,
+ load_checkpoint_in_model,
+ offload_state_dict,
+ parse_flag_from_env,
+ retie_parameters,
+)
+from .utils.other import recursive_getattr
+
+
+logger = logging.getLogger(__name__)
+
+
+@contextmanager
+def init_empty_weights(include_buffers: bool = None):
+ """
+ A context manager under which models are initialized with all parameters on the meta device, therefore creating an
+ empty model. Useful when just initializing the model would blow the available RAM.
+
+ Args:
+ include_buffers (`bool`, *optional*):
+ Whether or not to also put all buffers on the meta device while initializing.
+
+ Example:
+
+ ```python
+ import torch.nn as nn
+ from accelerate import init_empty_weights
+
+ # Initialize a model with 100 billions parameters in no time and without using any RAM.
+ with init_empty_weights():
+ tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
+ ```
+
+
+
+ Any model created under this context manager has no weights. As such you can't do something like
+ `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
+ Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
+ called.
+
+
+ """
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
+ with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
+ yield f
+
+
+@contextmanager
+def init_on_device(device: torch.device, include_buffers: bool = None):
+ """
+ A context manager under which models are initialized with all parameters on the specified device.
+
+ Args:
+ device (`torch.device`):
+ Device to initialize all parameters on.
+ include_buffers (`bool`, *optional*):
+ Whether or not to also put all buffers on the meta device while initializing.
+
+ Example:
+
+ ```python
+ import torch.nn as nn
+ from accelerate import init_on_device
+
+ with init_on_device(device=torch.device("cuda")):
+ tst = nn.Liner(100, 100) # on `cuda` device
+ ```
+ """
+ if include_buffers is None:
+ include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
+
+ # TODO(shingjan): remove the torch version check once older versions are deprecated
+ if is_torch_version(">=", "2.0") and include_buffers:
+ with device:
+ yield
+ return
+
+ old_register_parameter = nn.Module.register_parameter
+ if include_buffers:
+ old_register_buffer = nn.Module.register_buffer
+
+ def register_empty_parameter(module, name, param):
+ old_register_parameter(module, name, param)
+ if param is not None:
+ param_cls = type(module._parameters[name])
+ kwargs = module._parameters[name].__dict__
+ kwargs["requires_grad"] = param.requires_grad
+ module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
+
+ def register_empty_buffer(module, name, buffer, persistent=True):
+ old_register_buffer(module, name, buffer, persistent=persistent)
+ if buffer is not None:
+ module._buffers[name] = module._buffers[name].to(device)
+
+ # Patch tensor creation
+ if include_buffers:
+ tensor_constructors_to_patch = {
+ torch_function_name: getattr(torch, torch_function_name)
+ for torch_function_name in ["empty", "zeros", "ones", "full"]
+ }
+ else:
+ tensor_constructors_to_patch = {}
+
+ def patch_tensor_constructor(fn):
+ def wrapper(*args, **kwargs):
+ kwargs["device"] = device
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ try:
+ nn.Module.register_parameter = register_empty_parameter
+ if include_buffers:
+ nn.Module.register_buffer = register_empty_buffer
+ for torch_function_name in tensor_constructors_to_patch.keys():
+ setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
+ yield
+ finally:
+ nn.Module.register_parameter = old_register_parameter
+ if include_buffers:
+ nn.Module.register_buffer = old_register_buffer
+ for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
+ setattr(torch, torch_function_name, old_torch_function)
+
+
+def cpu_offload(
+ model: nn.Module,
+ execution_device: Optional[torch.device] = None,
+ offload_buffers: bool = False,
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+):
+ """
+ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
+ copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
+ state dict and put on the execution device passed as they are needed, then offloaded again.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to offload.
+ execution_device (`torch.device`, *optional*):
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
+ model first parameter device.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
+ The state dict of the model that will be kept on CPU.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ """
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ if state_dict is None:
+ state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
+
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(
+ model,
+ execution_device=execution_device,
+ offload=True,
+ offload_buffers=offload_buffers,
+ weights_map=state_dict,
+ preload_module_classes=preload_module_classes,
+ )
+
+ return model
+
+
+def cpu_offload_with_hook(
+ model: torch.nn.Module,
+ execution_device: Optional[Union[int, str, torch.device]] = None,
+ prev_module_hook: Optional[UserCpuOffloadHook] = None,
+):
+ """
+ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
+ [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
+ the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to offload.
+ execution_device(`str`, `int` or `torch.device`, *optional*):
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
+ GPU 0 if there is a GPU, and finally to the CPU.
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
+ The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
+ offload method will be called just before the forward of the model to which this hook is attached.
+
+ Example:
+
+ ```py
+ model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
+ model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
+ model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
+
+ hid_1 = model_1(input)
+ for i in range(50):
+ # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
+ hid_2 = model_2(hid_1)
+ # model2 is offloaded to the CPU just before this forward.
+ hid_3 = model_3(hid_3)
+
+ # For model3, you need to manually call the hook offload method.
+ hook_3.offload()
+ ```
+ """
+ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
+ add_hook_to_module(model, hook, append=True)
+ user_hook = UserCpuOffloadHook(model, hook)
+ return model, user_hook
+
+
+def disk_offload(
+ model: nn.Module,
+ offload_dir: Union[str, os.PathLike],
+ execution_device: Optional[torch.device] = None,
+ offload_buffers: bool = False,
+ preload_module_classes: Optional[List[str]] = None,
+):
+ """
+ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
+ memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
+ put on the execution device passed as they are needed, then offloaded again.
+
+ Args:
+ model (`torch.nn.Module`): The model to offload.
+ offload_dir (`str` or `os.PathLike`):
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
+ execution_device (`torch.device`, *optional*):
+ The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
+ model's first parameter device.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ """
+ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
+ offload_state_dict(offload_dir, model.state_dict())
+ if execution_device is None:
+ execution_device = next(iter(model.parameters())).device
+ weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
+
+ add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
+ attach_align_device_hook(
+ model,
+ execution_device=execution_device,
+ offload=True,
+ offload_buffers=offload_buffers,
+ weights_map=weights_map,
+ preload_module_classes=preload_module_classes,
+ )
+
+ return model
+
+
+def dispatch_model(
+ model: nn.Module,
+ device_map: Dict[str, Union[str, int, torch.device]],
+ main_device: Optional[torch.device] = None,
+ state_dict: Optional[Dict[str, torch.Tensor]] = None,
+ offload_dir: Optional[Union[str, os.PathLike]] = None,
+ offload_index: Optional[Dict[str, str]] = None,
+ offload_buffers: bool = False,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ force_hooks: bool = False,
+):
+ """
+ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
+ the CPU or even the disk.
+
+ Args:
+ model (`torch.nn.Module`):
+ The model to dispatch.
+ device_map (`Dict[str, Union[str, int, torch.device]]`):
+ A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
+ `"disk"` is accepted even if it's not a proper value for `torch.device`.
+ main_device (`str`, `int` or `torch.device`, *optional*):
+ The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
+ `"disk"`.
+ state_dict (`Dict[str, torch.Tensor]`, *optional*):
+ The state dict of the part of the model that will be kept on CPU.
+ offload_dir (`str` or `os.PathLike`):
+ The folder in which to offload the model weights (or where the model weights are already offloaded).
+ offload_index (`Dict`, *optional*):
+ A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
+ to the index saved in `save_folder`.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to offload the buffers with the model parameters.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ force_hooks (`bool`, *optional*, defaults to `False`):
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
+ single device.
+ """
+ # Error early if the device map is incomplete.
+ check_device_map(model, device_map)
+
+ # for backward compatibility
+ is_bnb_quantized = (
+ getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
+ ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
+
+ # We attach hooks if the device_map has at least 2 different devices or if
+ # force_hooks is set to `True`. Otherwise, the model in already loaded
+ # in the unique device and the user can decide where to dispatch the model.
+ # If the model is quantized, we always force-dispatch the model
+ if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
+ if main_device is None:
+ if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
+ main_device = "cpu"
+ else:
+ main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
+
+ if main_device != "cpu":
+ cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
+ if state_dict is None and len(cpu_modules) > 0:
+ state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
+
+ disk_modules = [name for name, device in device_map.items() if device == "disk"]
+ if offload_dir is None and offload_index is None and len(disk_modules) > 0:
+ raise ValueError(
+ "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
+ f"need to be offloaded: {', '.join(disk_modules)}."
+ )
+ if (
+ len(disk_modules) > 0
+ and offload_index is None
+ and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
+ ):
+ disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
+ offload_state_dict(offload_dir, disk_state_dict)
+
+ execution_device = {
+ name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
+ }
+ execution_device[""] = main_device
+ offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
+ offload = {name: device in offloaded_devices for name, device in device_map.items()}
+ save_folder = offload_dir if len(disk_modules) > 0 else None
+ if state_dict is not None or save_folder is not None or offload_index is not None:
+ device = main_device if offload_index is not None else None
+ weights_map = OffloadedWeightsLoader(
+ state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
+ )
+ else:
+ weights_map = None
+
+ # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
+ # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
+ # original pointer) on each devices.
+ tied_params = find_tied_parameters(model)
+
+ tied_params_map = {}
+ for group in tied_params:
+ for param_name in group:
+ # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
+ # to care about views of tensors through storage_offset.
+ data_ptr = recursive_getattr(model, param_name).data_ptr()
+ tied_params_map[data_ptr] = {}
+
+ # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
+
+ attach_align_device_hook_on_blocks(
+ model,
+ execution_device=execution_device,
+ offload=offload,
+ offload_buffers=offload_buffers,
+ weights_map=weights_map,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ tied_params_map=tied_params_map,
+ )
+
+ # warn if there is any params on the meta device
+ offloaded_devices_str = " and ".join(
+ [device for device in set(device_map.values()) if device in ("cpu", "disk")]
+ )
+ if len(offloaded_devices_str) > 0:
+ logging.warning(
+ f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
+ )
+
+ # Attaching the hook may break tied weights, so we retie them
+ retie_parameters(model, tied_params)
+
+ # add warning to cuda and to method
+ def add_warning(fn, model):
+ @wraps(fn)
+ def wrapper(*args, **kwargs):
+ warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
+ if str(fn.__name__) == "to":
+ to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
+ if to_device is not None:
+ logger.warning(warning_msg)
+ else:
+ logger.warning(warning_msg)
+ for param in model.parameters():
+ if param.device == torch.device("meta"):
+ raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
+ return fn(*args, **kwargs)
+
+ return wrapper
+
+ model.to = add_warning(model.to, model)
+ if is_npu_available():
+ model.npu = add_warning(model.npu, model)
+ elif is_mlu_available():
+ model.mlu = add_warning(model.mlu, model)
+ elif is_xpu_available():
+ model.xpu = add_warning(model.xpu, model)
+ else:
+ model.cuda = add_warning(model.cuda, model)
+
+ # Check if we are using multi-gpus with RTX 4000 series
+ use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
+ if use_multi_gpu and not check_cuda_p2p_ib_support():
+ logger.warning(
+ "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
+ "This can affect the multi-gpu inference when using accelerate device_map."
+ "Please make sure to update your driver to the latest version which resolves this."
+ )
+ else:
+ device = list(device_map.values())[0]
+ # `torch.Tensor.to()` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
+ if is_npu_available() and isinstance(device, int):
+ device = f"npu:{device}"
+ elif is_mlu_available() and isinstance(device, int):
+ device = f"mlu:{device}"
+ elif is_xpu_available() and isinstance(device, int):
+ device = f"xpu:{device}"
+ if device != "disk":
+ model.to(device)
+ else:
+ raise ValueError(
+ "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
+ )
+ # Convert OrderedDict back to dict for easier usage
+ model.hf_device_map = dict(device_map)
+ return model
+
+
+def load_checkpoint_and_dispatch(
+ model: nn.Module,
+ checkpoint: Union[str, os.PathLike],
+ device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
+ max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
+ no_split_module_classes: Optional[List[str]] = None,
+ offload_folder: Optional[Union[str, os.PathLike]] = None,
+ offload_buffers: bool = False,
+ dtype: Optional[Union[str, torch.dtype]] = None,
+ offload_state_dict: Optional[bool] = None,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ force_hooks: bool = False,
+):
+ """
+ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
+ loaded and adds the various hooks that will make this model run properly (even if split across devices).
+
+ Args:
+ model (`torch.nn.Module`): The model in which we want to load a checkpoint.
+ checkpoint (`str` or `os.PathLike`):
+ The folder checkpoint to load. It can be:
+ - a path to a file containing a whole model state dict
+ - a path to a `.json` file containing the index to a sharded checkpoint
+ - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
+ device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
+ A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
+ name, once a given module name is inside, every submodule of it will be sent to the same device.
+
+ To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
+ information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
+ Defaults to None, which means [`dispatch_model`] will not be called.
+ max_memory (`Dict`, *optional*):
+ A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
+ and the available CPU RAM if unset.
+ no_split_module_classes (`List[str]`, *optional*):
+ A list of layer class names that should never be split across device (for instance any layer that has a
+ residual connection).
+ offload_folder (`str` or `os.PathLike`, *optional*):
+ If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
+ well as the parameters.
+ dtype (`str` or `torch.dtype`, *optional*):
+ If provided, the weights will be converted to that type when loaded.
+ offload_state_dict (`bool`, *optional*):
+ If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
+ the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
+ picked contains `"disk"` values.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ force_hooks (`bool`, *optional*, defaults to `False`):
+ Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
+ single device.
+
+ Example:
+
+ ```python
+ >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
+ >>> from huggingface_hub import hf_hub_download
+ >>> from transformers import AutoConfig, AutoModelForCausalLM
+
+ >>> # Download the Weights
+ >>> checkpoint = "EleutherAI/gpt-j-6B"
+ >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
+
+ >>> # Create a model and initialize it with empty weights
+ >>> config = AutoConfig.from_pretrained(checkpoint)
+ >>> with init_empty_weights():
+ ... model = AutoModelForCausalLM.from_config(config)
+
+ >>> # Load the checkpoint and dispatch it to the right devices
+ >>> model = load_checkpoint_and_dispatch(
+ ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
+ ... )
+ ```
+ """
+ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
+ raise ValueError(
+ "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
+ "'sequential'."
+ )
+ if isinstance(device_map, str):
+ if device_map != "sequential":
+ max_memory = get_balanced_memory(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ dtype=dtype,
+ low_zero=(device_map == "balanced_low_0"),
+ )
+ device_map = infer_auto_device_map(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ dtype=dtype,
+ offload_buffers=offload_buffers,
+ )
+ if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
+ offload_state_dict = True
+ load_checkpoint_in_model(
+ model,
+ checkpoint,
+ device_map=device_map,
+ offload_folder=offload_folder,
+ dtype=dtype,
+ offload_state_dict=offload_state_dict,
+ offload_buffers=offload_buffers,
+ )
+ if device_map is None:
+ return model
+ return dispatch_model(
+ model,
+ device_map=device_map,
+ offload_dir=offload_folder,
+ offload_buffers=offload_buffers,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ force_hooks=force_hooks,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea5a08abf51a83ca048524ea0b8758f9d52b7edc
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/accelerate_cli.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from accelerate.commands.config import get_config_parser
+from accelerate.commands.env import env_command_parser
+from accelerate.commands.estimate import estimate_command_parser
+from accelerate.commands.launch import launch_command_parser
+from accelerate.commands.test import test_command_parser
+from accelerate.commands.tpu import tpu_command_parser
+from accelerate.commands.utils import CustomArgumentParser
+
+
+def main():
+ parser = CustomArgumentParser("Accelerate CLI tool", usage="accelerate []", allow_abbrev=False)
+ subparsers = parser.add_subparsers(help="accelerate command helpers")
+
+ # Register commands
+ get_config_parser(subparsers=subparsers)
+ estimate_command_parser(subparsers=subparsers)
+ env_command_parser(subparsers=subparsers)
+ launch_command_parser(subparsers=subparsers)
+ tpu_command_parser(subparsers=subparsers)
+ test_command_parser(subparsers=subparsers)
+
+ # Let's go
+ args = parser.parse_args()
+
+ if not hasattr(args, "func"):
+ parser.print_help()
+ exit(1)
+
+ # Run
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..649a15888cccd070b3d4ca9a600457c6ad59d4d3
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/__init__.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+from .config import config_command_parser
+from .config_args import default_config_file, load_config_from_file # noqa: F401
+from .default import default_command_parser
+from .update import update_command_parser
+
+
+def get_config_parser(subparsers=None):
+ parent_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
+ # The main config parser
+ config_parser = config_command_parser(subparsers)
+ # The subparser to add commands to
+ subcommands = config_parser.add_subparsers(title="subcommands", dest="subcommand")
+
+ # Then add other parsers with the parent parser
+ default_command_parser(subcommands, parents=[parent_parser])
+ update_command_parser(subcommands, parents=[parent_parser])
+
+ return config_parser
+
+
+def main():
+ config_parser = get_config_parser()
+ args = config_parser.parse_args()
+
+ if not hasattr(args, "func"):
+ config_parser.print_help()
+ exit(1)
+
+ # Run
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/cluster.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d8821b0660932a946dc238151b2c6599de625d1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/cluster.py
@@ -0,0 +1,705 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+from ...utils import (
+ ComputeEnvironment,
+ DistributedType,
+ is_deepspeed_available,
+ is_mlu_available,
+ is_mps_available,
+ is_npu_available,
+ is_transformers_available,
+ is_xpu_available,
+)
+from ...utils.constants import (
+ DEEPSPEED_MULTINODE_LAUNCHERS,
+ FSDP_AUTO_WRAP_POLICY,
+ FSDP_BACKWARD_PREFETCH,
+ FSDP_SHARDING_STRATEGY,
+ FSDP_STATE_DICT_TYPE,
+ TORCH_DYNAMO_MODES,
+)
+from .config_args import ClusterConfig
+from .config_utils import (
+ DYNAMO_BACKENDS,
+ _ask_field,
+ _ask_options,
+ _convert_distributed_mode,
+ _convert_dynamo_backend,
+ _convert_mixed_precision,
+ _convert_yes_no_to_bool,
+)
+
+
+def get_cluster_input():
+ distributed_type = _ask_options(
+ "Which type of machine are you using?",
+ ["No distributed training", "multi-CPU", "multi-XPU", "multi-GPU", "multi-NPU", "multi-MLU", "TPU"],
+ _convert_distributed_mode,
+ )
+
+ machine_rank = 0
+ num_machines = 1
+ num_processes = 1
+ gpu_ids = None
+ main_process_ip = None
+ main_process_port = None
+ rdzv_backend = "static"
+ same_network = True
+ debug = False
+
+ if distributed_type in [
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ DistributedType.MULTI_CPU,
+ ]:
+ num_machines = _ask_field(
+ "How many different machines will you use (use more than 1 for multi-node training)? [1]: ",
+ int,
+ default=1,
+ )
+ if num_machines > 1:
+ machine_rank = _ask_options(
+ "What is the rank of this machine?",
+ list(range(num_machines)),
+ int,
+ )
+ main_process_ip = _ask_field(
+ "What is the IP address of the machine that will host the main process? ",
+ )
+ main_process_port = _ask_field(
+ "What is the port you will use to communicate with the main process? ",
+ int,
+ )
+ same_network = _ask_field(
+ "Are all the machines on the same local network? Answer `no` if nodes are on the cloud and/or on different network hosts [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+ if not same_network:
+ rdzv_backend = _ask_field(
+ "What rendezvous backend will you use? ('static', 'c10d', ...): ", default="static"
+ )
+ debug = _ask_field(
+ "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+
+ if distributed_type == DistributedType.NO:
+ use_cpu = _ask_field(
+ "Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)? [yes/NO]:",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ elif distributed_type == DistributedType.MULTI_CPU:
+ use_cpu = True
+ else:
+ use_cpu = False
+
+ ipex_config = {}
+ mpirun_config = {}
+ if use_cpu:
+ ipex_config["ipex"] = _ask_field(
+ "Do you want to use Intel PyTorch Extension (IPEX) to speed up training on CPU? [yes/NO]:",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if distributed_type == DistributedType.MULTI_CPU:
+ use_mpirun = _ask_field(
+ "Do you want accelerate to launch mpirun? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_mpirun:
+ mpirun_hostfile = _ask_field(
+ "Please enter the path to the hostfile to use with mpirun [~/hostfile]: ",
+ str,
+ default="~/hostfile",
+ )
+ mpirun_config["mpirun_hostfile"] = os.path.expanduser(mpirun_hostfile.strip())
+ mpirun_config["mpirun_ccl"] = _ask_field("Enter the number of oneCCL worker threads [1]: ", default=1)
+ if (
+ not use_cpu
+ and is_xpu_available()
+ and distributed_type
+ not in [DistributedType.MULTI_GPU, DistributedType.MULTI_NPU, DistributedType.MULTI_MLU, DistributedType.XLA]
+ ):
+ ipex_config["use_xpu"] = _ask_field(
+ "Do you want to use XPU plugin to speed up training on XPU? [yes/NO]:",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+
+ dynamo_config = {}
+ use_dynamo = _ask_field(
+ "Do you wish to optimize your script with torch dynamo?[yes/NO]:",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_dynamo:
+ prefix = "dynamo_"
+ dynamo_config[prefix + "backend"] = _ask_options(
+ "Which dynamo backend would you like to use?",
+ [x.lower() for x in DYNAMO_BACKENDS],
+ _convert_dynamo_backend,
+ default=2,
+ )
+ use_custom_options = _ask_field(
+ "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+
+ if use_custom_options:
+ dynamo_config[prefix + "mode"] = _ask_options(
+ "Which mode do you want to use?",
+ TORCH_DYNAMO_MODES,
+ lambda x: TORCH_DYNAMO_MODES[int(x)],
+ default=0,
+ )
+ dynamo_config[prefix + "use_fullgraph"] = _ask_field(
+ "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ dynamo_config[prefix + "use_dynamic"] = _ask_field(
+ "Do you want to enable dynamic shape tracing? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+
+ use_mps = not use_cpu and is_mps_available()
+ deepspeed_config = {}
+ if (
+ distributed_type
+ in [
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_XPU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.NO,
+ ]
+ and not use_mps
+ ):
+ use_deepspeed = _ask_field(
+ "Do you want to use DeepSpeed? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_deepspeed:
+ distributed_type = DistributedType.DEEPSPEED
+ assert (
+ is_deepspeed_available()
+ ), "DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source"
+
+ if distributed_type == DistributedType.DEEPSPEED:
+ use_deepspeed_config = _ask_field(
+ "Do you want to specify a json file to a DeepSpeed config? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_deepspeed_config:
+ deepspeed_config["deepspeed_config_file"] = _ask_field(
+ "Please enter the path to the json DeepSpeed config file: ",
+ str,
+ default="none",
+ )
+ else:
+ deepspeed_config["zero_stage"] = _ask_options(
+ "What should be your DeepSpeed's ZeRO optimization stage?",
+ [0, 1, 2, 3],
+ int,
+ default=2,
+ )
+
+ deepspeed_devices = ["none", "cpu", "nvme"]
+ if deepspeed_config["zero_stage"] >= 2:
+ deepspeed_config["offload_optimizer_device"] = _ask_options(
+ "Where to offload optimizer states?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
+ )
+ deepspeed_config["offload_param_device"] = _ask_options(
+ "Where to offload parameters?", deepspeed_devices, lambda x: deepspeed_devices[int(x)]
+ )
+ if deepspeed_config["offload_param_device"] == "nvme":
+ deepspeed_config["offload_param_nvme_path"] = _ask_field(
+ "Nvme Path to offload parameters?",
+ str,
+ default="/nvme",
+ )
+ if deepspeed_config["offload_optimizer_device"] == "nvme":
+ deepspeed_config["offload_optimizer_nvme_path"] = _ask_field(
+ "Nvme Path to offload optimizer states?",
+ str,
+ default="/nvme",
+ )
+ deepspeed_config["gradient_accumulation_steps"] = _ask_field(
+ "How many gradient accumulation steps you're passing in your script? [1]: ",
+ int,
+ default=1,
+ )
+ use_gradient_clipping = _ask_field(
+ "Do you want to use gradient clipping? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_gradient_clipping:
+ deepspeed_config["gradient_clipping"] = _ask_field(
+ "What is the gradient clipping value? [1.0]: ",
+ float,
+ default=1.0,
+ )
+ if deepspeed_config["zero_stage"] == 3:
+ deepspeed_config["zero3_save_16bit_model"] = _ask_field(
+ "Do you want to save 16-bit model weights when using ZeRO Stage-3? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ deepspeed_config["zero3_init_flag"] = _ask_field(
+ "Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if deepspeed_config["zero3_init_flag"]:
+ if not is_transformers_available():
+ raise Exception(
+ "When `zero3_init_flag` is set, it requires Transformers to be installed. "
+ "Please run `pip3 install transformers`."
+ )
+
+ if num_machines > 1:
+ launcher_query = "Which Type of launcher do you want to use?"
+ deepspeed_config["deepspeed_multinode_launcher"] = _ask_options(
+ launcher_query,
+ DEEPSPEED_MULTINODE_LAUNCHERS,
+ lambda x: DEEPSPEED_MULTINODE_LAUNCHERS[int(x)],
+ )
+
+ if deepspeed_config["deepspeed_multinode_launcher"] != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
+ deepspeed_config["deepspeed_hostfile"] = _ask_field(
+ "DeepSpeed configures multi-node compute resources with hostfile. "
+ "Each row is of the format `hostname slots=[num_gpus]`, e.g., `localhost slots=2`; "
+ "for more information please refer official [documentation]"
+ "(https://www.deepspeed.ai/getting-started/#resource-configuration-multi-node). "
+ "Please specify the location of hostfile: ",
+ str,
+ )
+
+ is_exclusion_filter = _ask_field(
+ "Do you want to specify exclusion filter string? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if is_exclusion_filter:
+ deepspeed_config["deepspeed_exclusion_filter"] = _ask_field(
+ "DeepSpeed exclusion filter string: ",
+ str,
+ )
+
+ is_inclusion_filter = _ask_field(
+ "Do you want to specify inclusion filter string? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if is_inclusion_filter:
+ deepspeed_config["deepspeed_inclusion_filter"] = _ask_field(
+ "DeepSpeed inclusion filter string: ",
+ str,
+ )
+
+ fsdp_config = {}
+ if distributed_type in [
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_XPU,
+ ]:
+ use_fsdp = _ask_field(
+ "Do you want to use FullyShardedDataParallel? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_fsdp:
+ distributed_type = DistributedType.FSDP
+ if distributed_type == DistributedType.FSDP:
+ sharding_strategy_query = "What should be your sharding strategy?"
+ fsdp_config["fsdp_sharding_strategy"] = _ask_options(
+ sharding_strategy_query,
+ FSDP_SHARDING_STRATEGY,
+ lambda x: FSDP_SHARDING_STRATEGY[int(x)],
+ )
+ fsdp_config["fsdp_offload_params"] = _ask_field(
+ "Do you want to offload parameters and gradients to CPU? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ fsdp_wrap_query = "What should be your auto wrap policy?"
+ fsdp_config["fsdp_auto_wrap_policy"] = _ask_options(
+ fsdp_wrap_query,
+ FSDP_AUTO_WRAP_POLICY,
+ lambda x: FSDP_AUTO_WRAP_POLICY[int(x)],
+ )
+ if fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[0]:
+ use_no_split_modules = _ask_field(
+ "Do you want to use the model's `_no_split_modules` to wrap. Only applicable for 🤗 Transformers [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if not use_no_split_modules:
+ fsdp_config["fsdp_transformer_layer_cls_to_wrap"] = _ask_field(
+ "Specify the comma-separated list of transformer layer class names (case-sensitive) to wrap ,e.g, :"
+ "`BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput` ...? : ",
+ str,
+ )
+ elif fsdp_config["fsdp_auto_wrap_policy"] == FSDP_AUTO_WRAP_POLICY[1]:
+ fsdp_config["fsdp_min_num_params"] = _ask_field(
+ "What should be your FSDP's minimum number of parameters for Default Auto Wrapping Policy? [1e8]: ",
+ int,
+ default=100000000,
+ )
+ fsdp_backward_prefetch_query = "What should be your FSDP's backward prefetch policy?"
+ fsdp_config["fsdp_backward_prefetch"] = _ask_options(
+ fsdp_backward_prefetch_query,
+ FSDP_BACKWARD_PREFETCH,
+ lambda x: FSDP_BACKWARD_PREFETCH[int(x)],
+ )
+ fsdp_state_dict_type_query = "What should be your FSDP's state dict type?"
+ fsdp_config["fsdp_state_dict_type"] = _ask_options(
+ fsdp_state_dict_type_query,
+ FSDP_STATE_DICT_TYPE,
+ lambda x: FSDP_STATE_DICT_TYPE[int(x)],
+ default=2,
+ )
+ fsdp_config["fsdp_forward_prefetch"] = _ask_field(
+ "Do you want to enable FSDP's forward prefetch policy? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ fsdp_config["fsdp_use_orig_params"] = _ask_field(
+ "Do you want to enable FSDP's `use_orig_params` feature? [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+ fsdp_config["fsdp_cpu_ram_efficient_loading"] = _ask_field(
+ "Do you want to enable CPU RAM efficient model loading? Only applicable for 🤗 Transformers models. [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+ if fsdp_config["fsdp_cpu_ram_efficient_loading"]:
+ fsdp_config["fsdp_sync_module_states"] = True
+ else:
+ fsdp_config["fsdp_sync_module_states"] = _ask_field(
+ "Do you want each individually wrapped FSDP unit to broadcast module parameters from rank 0 at the start? [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+
+ megatron_lm_config = {}
+ if distributed_type in [DistributedType.MULTI_GPU]:
+ use_megatron_lm = _ask_field(
+ "Do you want to use Megatron-LM ? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_megatron_lm:
+ distributed_type = DistributedType.MEGATRON_LM
+ if distributed_type == DistributedType.MEGATRON_LM:
+ prefix = "megatron_lm_"
+ megatron_lm_config[prefix + "tp_degree"] = _ask_field(
+ "What is the Tensor Parallelism degree/size? [1]:",
+ int,
+ default=1,
+ error_message="Please enter an integer.",
+ )
+ if megatron_lm_config[prefix + "tp_degree"] > 1:
+ megatron_lm_config[prefix + "sequence_parallelism"] = _ask_field(
+ "Do you want to enable Sequence Parallelism? [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+
+ megatron_lm_config[prefix + "pp_degree"] = _ask_field(
+ "What is the Pipeline Parallelism degree/size? [1]:",
+ int,
+ default=1,
+ error_message="Please enter an integer.",
+ )
+ if megatron_lm_config[prefix + "pp_degree"] > 1:
+ megatron_lm_config[prefix + "num_micro_batches"] = _ask_field(
+ "What is the number of micro-batches? [1]:",
+ int,
+ default=1,
+ error_message="Please enter an integer.",
+ )
+
+ megatron_lm_config[prefix + "recompute_activations"] = _ask_field(
+ "Do you want to enable selective activation recomputation? [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+
+ megatron_lm_config[prefix + "use_distributed_optimizer"] = _ask_field(
+ "Do you want to use distributed optimizer "
+ "which shards optimizer state and gradients across data parallel ranks? [YES/no]: ",
+ _convert_yes_no_to_bool,
+ default=True,
+ error_message="Please enter yes or no.",
+ )
+
+ megatron_lm_config[prefix + "gradient_clipping"] = _ask_field(
+ "What is the gradient clipping value based on global L2 Norm (0 to disable)? [1.0]: ",
+ float,
+ default=1.0,
+ )
+ # TPU specific defaults
+ tpu_commands = None
+ tpu_command_file = None
+ tpu_downcast_bf16 = "no"
+ tpu_env = []
+ tpu_name = None
+ tpu_vm = None
+ tpu_zone = None
+ tpu_use_sudo = False
+ tpu_use_cluster = False
+
+ if distributed_type in [
+ DistributedType.MULTI_CPU,
+ DistributedType.MULTI_XPU,
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.XLA,
+ ]:
+ machine_type = str(distributed_type).split(".")[1].replace("MULTI_", "")
+ if machine_type == "TPU":
+ machine_type += " cores"
+ elif machine_type == "CPU":
+ machine_type = "processes"
+ else:
+ machine_type += "(s)"
+ num_processes = _ask_field(
+ f"How many {machine_type} should be used for distributed training? [1]:",
+ int,
+ default=1,
+ error_message="Please enter an integer.",
+ )
+ elif distributed_type in [DistributedType.FSDP, DistributedType.DEEPSPEED, DistributedType.MEGATRON_LM]:
+ num_processes = _ask_field(
+ "How many GPU(s) should be used for distributed training? [1]:",
+ int,
+ default=1,
+ error_message="Please enter an integer.",
+ )
+ else:
+ num_processes = 1
+
+ if (distributed_type == DistributedType.MULTI_GPU) and (num_machines == 1) and (num_processes == 1):
+ raise ValueError(
+ f"Specified distributed type {distributed_type} but only using 1 GPU on a single machine. Please select `No distributed training` for the type of machine you are using."
+ )
+
+ if (
+ distributed_type
+ in [
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_XPU,
+ DistributedType.NO,
+ ]
+ and not use_cpu
+ and not use_mps
+ ):
+ if is_npu_available():
+ machine_type = "NPU(s)"
+ elif is_mlu_available():
+ machine_type = "MLU(s)"
+ else:
+ machine_type = "GPU(s)"
+ gpu_ids = _ask_field(
+ f"What {machine_type} (by id) should be used for training on this machine as a comma-seperated list? [all]:",
+ default="all",
+ )
+
+ # CPU affinity is only supported on NVIDIA hardware for now
+ enable_cpu_affinity = False
+ if distributed_type == (DistributedType.NO, DistributedType.MULTI_GPU) and not use_cpu and not use_mps:
+ enable_cpu_affinity = _ask_field(
+ "Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+
+ if distributed_type == DistributedType.XLA:
+ mixed_precision = "no"
+ main_training_function = _ask_field(
+ "What is the name of the function in your script that should be launched in all parallel scripts? [main]: ",
+ default="main",
+ )
+ tpu_use_cluster = _ask_field(
+ "Are you using a TPU cluster? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if tpu_use_cluster:
+ tpu_name = _ask_field(
+ "What is the name of your TPU cluster? ",
+ default=None,
+ error_message="Please enter the name of your TPU cluster.",
+ )
+ tpu_zone = _ask_field(
+ "What is the zone of your TPU cluster? ",
+ default=None,
+ error_message="Please enter the zone of your TPU cluster.",
+ )
+ tpu_use_sudo = _ask_field(
+ "To run a python script in a TPU pod, should `sudo` be used? [yes/NO]: ",
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ run_commands = _ask_field(
+ "Do you have code you wish to run on startup in each pod? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if run_commands:
+ use_command_file = _ask_field(
+ "Is this code located in a bash script? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ if use_command_file:
+ tpu_command_file = _ask_field(
+ "What is the path to your bash script? ",
+ default=None,
+ error_message="Please enter the path to your bash script.",
+ )
+ tpu_command_file = os.path.abspath(tpu_command_file)
+ else:
+ print("Please enter each command seperately you wish to run on startup in each pod.")
+ tpu_commands = []
+ another_command = True
+ while another_command:
+ tpu_commands.append(
+ _ask_field(
+ "Please enter a single command to be ran ",
+ default=None,
+ error_message="Please enter the commands you wish to run on startup in each pod as a single string.",
+ )
+ )
+ another_command = _ask_field(
+ "Do you wish to add another command? [yes/NO]: ",
+ _convert_yes_no_to_bool,
+ default=False,
+ error_message="Please enter yes or no.",
+ )
+ tpu_vm = _ask_field(
+ "If not using an instance group, what are the names of the Compute VM instances to be used, seperated by a comma: ",
+ default="",
+ ).split(",")
+ tpu_env = _ask_field(
+ "What environment variables do you wish to set in each pod, seperated by a comma: ",
+ default="",
+ ).split(",")
+
+ else:
+ main_training_function = "main"
+ if distributed_type == DistributedType.DEEPSPEED and use_deepspeed_config:
+ mixed_precision = None
+ else:
+ mixed_precision = _ask_options(
+ "Do you wish to use FP16 or BF16 (mixed precision)?",
+ ["no", "fp16", "bf16", "fp8"],
+ _convert_mixed_precision,
+ )
+
+ if use_dynamo and mixed_precision == "no" and not use_cpu:
+ print(
+ "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts."
+ )
+
+ if distributed_type == DistributedType.XLA and mixed_precision == "bf16":
+ tpu_downcast_bf16 = _ask_field(
+ "Should `torch.float` be cast as `bfloat16` and `torch.double` remain `float32` on TPUs?", default="no"
+ )
+
+ return ClusterConfig(
+ compute_environment=ComputeEnvironment.LOCAL_MACHINE,
+ distributed_type=distributed_type,
+ num_processes=num_processes,
+ gpu_ids=gpu_ids,
+ mixed_precision=mixed_precision,
+ downcast_bf16=tpu_downcast_bf16,
+ machine_rank=machine_rank,
+ num_machines=num_machines,
+ main_process_ip=main_process_ip,
+ main_process_port=main_process_port,
+ main_training_function=main_training_function,
+ deepspeed_config=deepspeed_config,
+ fsdp_config=fsdp_config,
+ megatron_lm_config=megatron_lm_config,
+ ipex_config=ipex_config,
+ mpirun_config=mpirun_config,
+ use_cpu=use_cpu,
+ rdzv_backend=rdzv_backend,
+ same_network=same_network,
+ commands=tpu_commands,
+ command_file=tpu_command_file,
+ tpu_env=tpu_env,
+ tpu_name=tpu_name,
+ tpu_vm=tpu_vm,
+ tpu_zone=tpu_zone,
+ tpu_use_sudo=tpu_use_sudo,
+ tpu_use_cluster=tpu_use_cluster,
+ dynamo_config=dynamo_config,
+ debug=debug,
+ enable_cpu_affinity=enable_cpu_affinity,
+ )
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..72414f2abe62d76bd5133f4b0ed99bf34133f6f6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+
+from accelerate.utils import ComputeEnvironment
+
+from .cluster import get_cluster_input
+from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
+from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
+from .sagemaker import get_sagemaker_input
+
+
+description = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
+
+
+def get_user_input():
+ compute_environment = _ask_options(
+ "In which compute environment are you running?",
+ ["This machine", "AWS (Amazon SageMaker)"],
+ _convert_compute_environment,
+ )
+ if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ config = get_sagemaker_input()
+ else:
+ config = get_cluster_input()
+ return config
+
+
+def config_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser("config", description=description)
+ else:
+ parser = argparse.ArgumentParser("Accelerate config command", description=description)
+
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ help=(
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
+ "with 'huggingface'."
+ ),
+ )
+
+ if subparsers is not None:
+ parser.set_defaults(func=config_command)
+ return parser
+
+
+def config_command(args):
+ config = get_user_input()
+ if args.config_file is not None:
+ config_file = args.config_file
+ else:
+ if not os.path.isdir(cache_dir):
+ os.makedirs(cache_dir)
+ config_file = default_yaml_config_file
+
+ if config_file.endswith(".json"):
+ config.to_json_file(config_file)
+ else:
+ config.to_yaml_file(config_file)
+ print(f"accelerate configuration saved at {config_file}")
+
+
+def main():
+ parser = config_command_parser()
+ args = parser.parse_args()
+ config_command(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_args.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_args.py
new file mode 100644
index 0000000000000000000000000000000000000000..c50f1c34a42d354903a80b506290958807a7b7c0
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/config_args.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import os
+from dataclasses import dataclass
+from enum import Enum
+from typing import List, Optional, Union
+
+import yaml
+
+from ...utils import ComputeEnvironment, DistributedType, SageMakerDistributedType
+from ...utils.constants import SAGEMAKER_PYTHON_VERSION, SAGEMAKER_PYTORCH_VERSION, SAGEMAKER_TRANSFORMERS_VERSION
+
+
+hf_cache_home = os.path.expanduser(
+ os.environ.get("HF_HOME", os.path.join(os.environ.get("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
+)
+cache_dir = os.path.join(hf_cache_home, "accelerate")
+default_json_config_file = os.path.join(cache_dir, "default_config.yaml")
+default_yaml_config_file = os.path.join(cache_dir, "default_config.yaml")
+
+# For backward compatibility: the default config is the json one if it's the only existing file.
+if os.path.isfile(default_yaml_config_file) or not os.path.isfile(default_json_config_file):
+ default_config_file = default_yaml_config_file
+else:
+ default_config_file = default_json_config_file
+
+
+def load_config_from_file(config_file):
+ if config_file is not None:
+ if not os.path.isfile(config_file):
+ raise FileNotFoundError(
+ f"The passed configuration file `{config_file}` does not exist. "
+ "Please pass an existing file to `accelerate launch`, or use the default one "
+ "created through `accelerate config` and run `accelerate launch` "
+ "without the `--config_file` argument."
+ )
+ else:
+ config_file = default_config_file
+ with open(config_file, encoding="utf-8") as f:
+ if config_file.endswith(".json"):
+ if (
+ json.load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
+ == ComputeEnvironment.LOCAL_MACHINE
+ ):
+ config_class = ClusterConfig
+ else:
+ config_class = SageMakerConfig
+ return config_class.from_json_file(json_file=config_file)
+ else:
+ if (
+ yaml.safe_load(f).get("compute_environment", ComputeEnvironment.LOCAL_MACHINE)
+ == ComputeEnvironment.LOCAL_MACHINE
+ ):
+ config_class = ClusterConfig
+ else:
+ config_class = SageMakerConfig
+ return config_class.from_yaml_file(yaml_file=config_file)
+
+
+@dataclass
+class BaseConfig:
+ compute_environment: ComputeEnvironment
+ distributed_type: Union[DistributedType, SageMakerDistributedType]
+ mixed_precision: str
+ use_cpu: bool
+ debug: bool
+
+ def to_dict(self):
+ result = self.__dict__
+ # For serialization, it's best to convert Enums to strings (or their underlying value type).
+ for key, value in result.items():
+ if isinstance(value, Enum):
+ result[key] = value.value
+ if isinstance(value, dict) and not bool(value):
+ result[key] = None
+ result = {k: v for k, v in result.items() if v is not None}
+ return result
+
+ @classmethod
+ def from_json_file(cls, json_file=None):
+ json_file = default_json_config_file if json_file is None else json_file
+ with open(json_file, encoding="utf-8") as f:
+ config_dict = json.load(f)
+ if "compute_environment" not in config_dict:
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
+ if "mixed_precision" not in config_dict:
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
+ if "fp16" in config_dict: # Convert the config to the new format.
+ del config_dict["fp16"]
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
+ dynamo_backend = config_dict.pop("dynamo_backend")
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
+ if "use_cpu" not in config_dict:
+ config_dict["use_cpu"] = False
+ if "debug" not in config_dict:
+ config_dict["debug"] = False
+ if "enable_cpu_affinity" not in config_dict:
+ config_dict["enable_cpu_affinity"] = False
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
+ if len(extra_keys) > 0:
+ raise ValueError(
+ f"The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
+ " version or fix (and potentially remove) these keys from your config file."
+ )
+
+ return cls(**config_dict)
+
+ def to_json_file(self, json_file):
+ with open(json_file, "w", encoding="utf-8") as f:
+ content = json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
+ f.write(content)
+
+ @classmethod
+ def from_yaml_file(cls, yaml_file=None):
+ yaml_file = default_yaml_config_file if yaml_file is None else yaml_file
+ with open(yaml_file, encoding="utf-8") as f:
+ config_dict = yaml.safe_load(f)
+ if "compute_environment" not in config_dict:
+ config_dict["compute_environment"] = ComputeEnvironment.LOCAL_MACHINE
+ if "mixed_precision" not in config_dict:
+ config_dict["mixed_precision"] = "fp16" if ("fp16" in config_dict and config_dict["fp16"]) else None
+ if isinstance(config_dict["mixed_precision"], bool) and not config_dict["mixed_precision"]:
+ config_dict["mixed_precision"] = "no"
+ if "fp16" in config_dict: # Convert the config to the new format.
+ del config_dict["fp16"]
+ if "dynamo_backend" in config_dict: # Convert the config to the new format.
+ dynamo_backend = config_dict.pop("dynamo_backend")
+ config_dict["dynamo_config"] = {} if dynamo_backend == "NO" else {"dynamo_backend": dynamo_backend}
+ if "use_cpu" not in config_dict:
+ config_dict["use_cpu"] = False
+ if "debug" not in config_dict:
+ config_dict["debug"] = False
+ if "enable_cpu_affinity" not in config_dict:
+ config_dict["enable_cpu_affinity"] = False
+ extra_keys = sorted(set(config_dict.keys()) - set(cls.__dataclass_fields__.keys()))
+ if len(extra_keys) > 0:
+ raise ValueError(
+ f"The config file at {yaml_file} had unknown keys ({extra_keys}), please try upgrading your `accelerate`"
+ " version or fix (and potentially remove) these keys from your config file."
+ )
+ return cls(**config_dict)
+
+ def to_yaml_file(self, yaml_file):
+ with open(yaml_file, "w", encoding="utf-8") as f:
+ yaml.safe_dump(self.to_dict(), f)
+
+ def __post_init__(self):
+ if isinstance(self.compute_environment, str):
+ self.compute_environment = ComputeEnvironment(self.compute_environment)
+ if isinstance(self.distributed_type, str):
+ if self.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ self.distributed_type = SageMakerDistributedType(self.distributed_type)
+ else:
+ self.distributed_type = DistributedType(self.distributed_type)
+ if getattr(self, "dynamo_config", None) is None:
+ self.dynamo_config = {}
+
+
+@dataclass
+class ClusterConfig(BaseConfig):
+ num_processes: int
+ machine_rank: int = 0
+ num_machines: int = 1
+ gpu_ids: Optional[str] = None
+ main_process_ip: Optional[str] = None
+ main_process_port: Optional[int] = None
+ rdzv_backend: Optional[str] = "static"
+ same_network: Optional[bool] = False
+ main_training_function: str = "main"
+ enable_cpu_affinity: bool = False
+
+ # args for deepspeed_plugin
+ deepspeed_config: dict = None
+ # args for fsdp
+ fsdp_config: dict = None
+ # args for megatron_lm
+ megatron_lm_config: dict = None
+ # args for ipex
+ ipex_config: dict = None
+ # args for mpirun
+ mpirun_config: dict = None
+ # args for TPU
+ downcast_bf16: bool = False
+
+ # args for TPU pods
+ tpu_name: str = None
+ tpu_zone: str = None
+ tpu_use_cluster: bool = False
+ tpu_use_sudo: bool = False
+ command_file: str = None
+ commands: List[str] = None
+ tpu_vm: List[str] = None
+ tpu_env: List[str] = None
+
+ # args for dynamo
+ dynamo_config: dict = None
+
+ def __post_init__(self):
+ if self.deepspeed_config is None:
+ self.deepspeed_config = {}
+ if self.fsdp_config is None:
+ self.fsdp_config = {}
+ if self.megatron_lm_config is None:
+ self.megatron_lm_config = {}
+ if self.ipex_config is None:
+ self.ipex_config = {}
+ if self.mpirun_config is None:
+ self.mpirun_config = {}
+ return super().__post_init__()
+
+
+@dataclass
+class SageMakerConfig(BaseConfig):
+ ec2_instance_type: str
+ iam_role_name: str
+ image_uri: Optional[str] = None
+ profile: Optional[str] = None
+ region: str = "us-east-1"
+ num_machines: int = 1
+ gpu_ids: str = "all"
+ base_job_name: str = f"accelerate-sagemaker-{num_machines}"
+ pytorch_version: str = SAGEMAKER_PYTORCH_VERSION
+ transformers_version: str = SAGEMAKER_TRANSFORMERS_VERSION
+ py_version: str = SAGEMAKER_PYTHON_VERSION
+ sagemaker_inputs_file: str = None
+ sagemaker_metrics_file: str = None
+ additional_args: dict = None
+ dynamo_config: dict = None
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/default.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/default.py
new file mode 100644
index 0000000000000000000000000000000000000000..e33331b98e6c8eacbaf8e9710b40e2ca6fc88b3d
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/config/default.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+
+import torch
+
+from ...utils import is_mlu_available, is_npu_available, is_xpu_available
+from .config_args import ClusterConfig, default_json_config_file
+from .config_utils import SubcommandHelpFormatter
+
+
+description = "Create a default config file for Accelerate with only a few flags set."
+
+
+def write_basic_config(mixed_precision="no", save_location: str = default_json_config_file, use_xpu: bool = False):
+ """
+ Creates and saves a basic cluster config to be used on a local machine with potentially multiple GPUs. Will also
+ set CPU if it is a CPU-only machine.
+
+ Args:
+ mixed_precision (`str`, *optional*, defaults to "no"):
+ Mixed Precision to use. Should be one of "no", "fp16", or "bf16"
+ save_location (`str`, *optional*, defaults to `default_json_config_file`):
+ Optional custom save location. Should be passed to `--config_file` when using `accelerate launch`. Default
+ location is inside the huggingface cache folder (`~/.cache/huggingface`) but can be overriden by setting
+ the `HF_HOME` environmental variable, followed by `accelerate/default_config.yaml`.
+ use_xpu (`bool`, *optional*, defaults to `False`):
+ Whether to use XPU if available.
+ """
+ path = Path(save_location)
+ path.parent.mkdir(parents=True, exist_ok=True)
+ if path.exists():
+ print(
+ f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`."
+ )
+ return False
+ mixed_precision = mixed_precision.lower()
+ if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
+ raise ValueError(
+ f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}"
+ )
+ config = {
+ "compute_environment": "LOCAL_MACHINE",
+ "mixed_precision": mixed_precision,
+ }
+ if is_mlu_available():
+ num_mlus = torch.mlu.device_count()
+ config["num_processes"] = num_mlus
+ config["use_cpu"] = False
+ if num_mlus > 1:
+ config["distributed_type"] = "MULTI_MLU"
+ else:
+ config["distributed_type"] = "NO"
+ elif torch.cuda.is_available():
+ num_gpus = torch.cuda.device_count()
+ config["num_processes"] = num_gpus
+ config["use_cpu"] = False
+ if num_gpus > 1:
+ config["distributed_type"] = "MULTI_GPU"
+ else:
+ config["distributed_type"] = "NO"
+ elif is_xpu_available() and use_xpu:
+ num_xpus = torch.xpu.device_count()
+ config["num_processes"] = num_xpus
+ config["use_cpu"] = False
+ if num_xpus > 1:
+ config["distributed_type"] = "MULTI_XPU"
+ else:
+ config["distributed_type"] = "NO"
+ elif is_npu_available():
+ num_npus = torch.npu.device_count()
+ config["num_processes"] = num_npus
+ config["use_cpu"] = False
+ if num_npus > 1:
+ config["distributed_type"] = "MULTI_NPU"
+ else:
+ config["distributed_type"] = "NO"
+ else:
+ num_xpus = 0
+ config["use_cpu"] = True
+ config["num_processes"] = 1
+ config["distributed_type"] = "NO"
+ config["debug"] = False
+ config = ClusterConfig(**config)
+ config.to_json_file(path)
+ return path
+
+
+def default_command_parser(parser, parents):
+ parser = parser.add_parser("default", parents=parents, help=description, formatter_class=SubcommandHelpFormatter)
+ parser.add_argument(
+ "--config_file",
+ default=default_json_config_file,
+ help=(
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
+ "with 'huggingface'."
+ ),
+ dest="save_location",
+ )
+
+ parser.add_argument(
+ "--mixed_precision",
+ choices=["no", "fp16", "bf16"],
+ type=str,
+ help="Whether or not to use mixed precision training. "
+ "Choose between FP16 and BF16 (bfloat16) training. "
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
+ default="no",
+ )
+ parser.set_defaults(func=default_config_command)
+ return parser
+
+
+def default_config_command(args):
+ config_file = write_basic_config(args.mixed_precision, args.save_location)
+ if config_file:
+ print(f"accelerate configuration saved at {config_file}")
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/env.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb2f60f787a9eba3f75b6ac9171aefd0ffc61647
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/env.py
@@ -0,0 +1,107 @@
+#!/usr/bin/env python
+
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import platform
+import subprocess
+
+import numpy as np
+import psutil
+import torch
+
+from accelerate import __version__ as version
+from accelerate.commands.config import default_config_file, load_config_from_file
+
+from ..utils import is_mlu_available, is_npu_available, is_xpu_available
+
+
+def env_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser("env")
+ else:
+ parser = argparse.ArgumentParser("Accelerate env command")
+
+ parser.add_argument(
+ "--config_file", default=None, help="The config file to use for the default values in the launching script."
+ )
+
+ if subparsers is not None:
+ parser.set_defaults(func=env_command)
+ return parser
+
+
+def env_command(args):
+ pt_version = torch.__version__
+ pt_cuda_available = torch.cuda.is_available()
+ pt_xpu_available = is_xpu_available()
+ pt_mlu_available = is_mlu_available()
+ pt_npu_available = is_npu_available()
+
+ accelerate_config = "Not found"
+ # Get the default from the config file.
+ if args.config_file is not None or os.path.isfile(default_config_file):
+ accelerate_config = load_config_from_file(args.config_file).to_dict()
+
+ # if we can run which, get it
+ command = None
+ bash_location = "Not found"
+ if os.name == "nt":
+ command = ["where", "accelerate"]
+ elif os.name == "posix":
+ command = ["which", "accelerate"]
+ if command is not None:
+ bash_location = subprocess.check_output(command, text=True, stderr=subprocess.STDOUT).strip()
+ info = {
+ "`Accelerate` version": version,
+ "Platform": platform.platform(),
+ "`accelerate` bash location": bash_location,
+ "Python version": platform.python_version(),
+ "Numpy version": np.__version__,
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
+ "PyTorch XPU available": str(pt_xpu_available),
+ "PyTorch NPU available": str(pt_npu_available),
+ "PyTorch MLU available": str(pt_mlu_available),
+ "System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
+ }
+ if pt_cuda_available:
+ info["GPU type"] = torch.cuda.get_device_name()
+
+ print("\nCopy-and-paste the text below in your GitHub issue\n")
+ print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]))
+
+ print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:")
+ accelerate_config_str = (
+ "\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()])
+ if isinstance(accelerate_config, dict)
+ else f"\t{accelerate_config}"
+ )
+ print(accelerate_config_str)
+
+ info["`Accelerate` configs"] = accelerate_config
+
+ return info
+
+
+def main() -> int:
+ parser = env_command_parser()
+ args = parser.parse_args()
+ env_command(args)
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/estimate.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/estimate.py
new file mode 100644
index 0000000000000000000000000000000000000000..56da3c5ad9e953687fab71dfc1fb0a878309d1d6
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/estimate.py
@@ -0,0 +1,309 @@
+#!/usr/bin/env python
+
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from huggingface_hub import model_info
+from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError
+
+from accelerate import init_empty_weights
+from accelerate.commands.utils import CustomArgumentParser
+from accelerate.utils import (
+ calculate_maximum_sizes,
+ convert_bytes,
+ is_timm_available,
+ is_transformers_available,
+)
+
+
+if is_transformers_available():
+ import transformers
+ from transformers import AutoConfig, AutoModel
+
+if is_timm_available():
+ import timm
+
+
+def verify_on_hub(repo: str, token: str = None):
+ "Verifies that the model is on the hub and returns the model info."
+ try:
+ return model_info(repo, token=token)
+ except GatedRepoError:
+ return "gated"
+ except RepositoryNotFoundError:
+ return "repo"
+
+
+def check_has_model(error):
+ """
+ Checks what library spawned `error` when a model is not found
+ """
+ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]:
+ return "timm"
+ elif (
+ is_transformers_available()
+ and isinstance(error, OSError)
+ and "does not appear to have a file named" in error.args[0]
+ ):
+ return "transformers"
+ else:
+ return "unknown"
+
+
+def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None):
+ """
+ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption.
+
+ Args:
+ model_name (`str`):
+ The model name on the Hub
+ library_name (`str`):
+ The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no
+ metadata on the Hub to determine the library.
+ trust_remote_code (`bool`, `optional`, defaults to `False`):
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
+ execute code present on the Hub on your local machine.
+ access_token (`str`, `optional`, defaults to `None`):
+ The access token to use to access private or gated models on the Hub. (for use on the Gradio app)
+
+ Returns:
+ `torch.nn.Module`: The torch model that has been initialized on the `meta` device.
+
+ """
+ model_info = verify_on_hub(model_name, access_token)
+ # Simplified errors
+ if model_info == "gated":
+ raise GatedRepoError(
+ f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`."
+ )
+ elif model_info == "repo":
+ raise RepositoryNotFoundError(
+ f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo,"
+ " make sure you are authenticated via `huggingface-cli login` and have access."
+ )
+ if library_name is None:
+ library_name = getattr(model_info, "library_name", False)
+ if not library_name:
+ raise ValueError(
+ f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)"
+ )
+ if library_name == "transformers":
+ if not is_transformers_available():
+ raise ImportError(
+ f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`"
+ )
+ print(f"Loading pretrained config for `{model_name}` from `transformers`...")
+ if model_info.config is None:
+ raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.")
+
+ auto_map = model_info.config.get("auto_map", False)
+ config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token)
+ with init_empty_weights():
+ # remote code could specify a specific `AutoModel` class in the `auto_map`
+ constructor = AutoModel
+ if isinstance(auto_map, dict):
+ value = None
+ for key in auto_map.keys():
+ if key.startswith("AutoModelFor"):
+ value = key
+ break
+ if value is not None:
+ constructor = getattr(transformers, value)
+ model = constructor.from_config(config, trust_remote_code=trust_remote_code)
+ elif library_name == "timm":
+ if not is_timm_available():
+ raise ImportError(
+ f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`"
+ )
+ print(f"Loading pretrained config for `{model_name}` from `timm`...")
+ with init_empty_weights():
+ model = timm.create_model(model_name, pretrained=False)
+ else:
+ raise ValueError(
+ f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support."
+ )
+ return model
+
+
+def create_ascii_table(headers: list, rows: list, title: str):
+ "Creates a pretty table from a list of rows, minimal version of `tabulate`."
+ sep_char, in_between = "│", "─"
+ column_widths = []
+ for i in range(len(headers)):
+ column_values = [row[i] for row in rows] + [headers[i]]
+ max_column_width = max(len(value) for value in column_values)
+ column_widths.append(max_column_width)
+
+ formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))]
+
+ pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}"
+ diff = 0
+
+ def make_row(left_char, middle_char, right_char):
+ return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}"
+
+ separator = make_row("├", "┼", "┤")
+ if len(title) > sum(column_widths):
+ diff = abs(len(title) - len(separator))
+ column_widths[-1] += diff
+
+ # Update with diff
+ separator = make_row("├", "┼", "┤")
+ initial_rows = [
+ make_row("┌", in_between, "┐"),
+ f"{sep_char}{title.center(len(separator) - 2)}{sep_char}",
+ make_row("├", "┬", "┤"),
+ ]
+ table = "\n".join(initial_rows) + "\n"
+ column_widths[-1] += diff
+ centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)]
+ table += f"{pattern % tuple(centered_line)}\n{separator}\n"
+ for i, line in enumerate(rows):
+ centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)]
+ table += f"{pattern % tuple(centered_line)}\n"
+ table += f'└{"┴".join([in_between * n for n in column_widths])}┘'
+
+ return table
+
+
+def estimate_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser("estimate-memory")
+ else:
+ parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.")
+
+ parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.")
+ parser.add_argument(
+ "--library_name",
+ type=str,
+ help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.",
+ choices=["timm", "transformers"],
+ )
+ parser.add_argument(
+ "--dtypes",
+ type=str,
+ nargs="+",
+ default=["float32", "float16", "int8", "int4"],
+ help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`",
+ choices=["float32", "float16", "int8", "int4"],
+ )
+ parser.add_argument(
+ "--trust_remote_code",
+ action="store_true",
+ help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag
+ should only be used for repositories you trust and in which you have read the code, as it will execute
+ code present on the Hub on your local machine.""",
+ default=False,
+ )
+
+ if subparsers is not None:
+ parser.set_defaults(func=estimate_command)
+ return parser
+
+
+def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict:
+ """
+ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of
+ 1.
+
+ Args:
+ bytes (`int`):
+ The size of the model being trained.
+ mixed_precision (`str`):
+ The mixed precision that would be ran.
+ msamp_config (`str`):
+ The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`.
+ """
+ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1}
+ fp32_size = bytes
+ fp16_size = bytes // 2
+
+ if mixed_precision == "float32":
+ memory_sizes["model"] = fp32_size
+ memory_sizes["gradients"] = fp32_size
+ memory_sizes["optimizer"] = fp32_size * 2
+ memory_sizes["step"] = fp32_size * 4
+ elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None):
+ # With native `TransformersEngine`, there is no memory savings with FP8
+ # With mixed precision training, the model has weights stored
+ # in FP16 and FP32
+ memory_sizes["model"] = fp32_size
+ # 1.5 from weight gradient + computation (GEMM)
+ memory_sizes["gradients"] = fp32_size + fp16_size
+ # 2x from optimizer states
+ memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states
+ memory_sizes["step"] = memory_sizes["optimizer"]
+ return memory_sizes
+
+
+def gather_data(args):
+ "Creates an empty model and gathers the data for the sizes"
+ try:
+ model = create_empty_model(
+ args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code
+ )
+ except (RuntimeError, OSError) as e:
+ library = check_has_model(e)
+ if library != "unknown":
+ raise RuntimeError(
+ f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo."
+ )
+ raise e
+
+ total_size, largest_layer = calculate_maximum_sizes(model)
+
+ data = []
+
+ for dtype in args.dtypes:
+ dtype_total_size = total_size
+ dtype_largest_layer = largest_layer[0]
+ dtype_training_size = estimate_training_usage(dtype_total_size, dtype)
+ if dtype == "float16":
+ dtype_total_size /= 2
+ dtype_largest_layer /= 2
+ elif dtype == "int8":
+ dtype_total_size /= 4
+ dtype_largest_layer /= 4
+ elif dtype == "int4":
+ dtype_total_size /= 8
+ dtype_largest_layer /= 8
+ data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size])
+ return data
+
+
+def estimate_command(args):
+ data = gather_data(args)
+ for row in data:
+ for i, item in enumerate(row):
+ if isinstance(item, (int, float)):
+ row[i] = convert_bytes(item)
+ elif isinstance(item, dict):
+ training_usage = max(item.values())
+ row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A"
+
+ headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"]
+
+ title = f"Memory Usage for loading `{args.model_name}`"
+ table = create_ascii_table(headers, data, title)
+ print(table)
+
+
+def main():
+ parser = estimate_command_parser()
+ args = parser.parse_args()
+ estimate_command(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/launch.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6b91654bc63c3cd0db9cca5f72be511458a20fb
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/launch.py
@@ -0,0 +1,1085 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import importlib
+import logging
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import psutil
+import torch
+
+from accelerate.commands.config import default_config_file, load_config_from_file
+from accelerate.commands.config.config_args import SageMakerConfig
+from accelerate.commands.config.config_utils import DYNAMO_BACKENDS
+from accelerate.commands.utils import CustomArgumentParser
+from accelerate.state import get_int_from_env
+from accelerate.utils import (
+ ComputeEnvironment,
+ DistributedType,
+ PrepareForLaunch,
+ _filter_args,
+ check_cuda_p2p_ib_support,
+ convert_dict_to_env_variables,
+ is_bf16_available,
+ is_deepspeed_available,
+ is_mlu_available,
+ is_npu_available,
+ is_rich_available,
+ is_sagemaker_available,
+ is_torch_version,
+ is_torch_xla_available,
+ is_xpu_available,
+ patch_environment,
+ prepare_deepspeed_cmd_env,
+ prepare_multi_gpu_env,
+ prepare_sagemager_args_inputs,
+ prepare_simple_launcher_cmd_env,
+ prepare_tpu,
+)
+from accelerate.utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS, TORCH_DYNAMO_MODES
+
+
+if is_rich_available():
+ from rich import get_console
+ from rich.logging import RichHandler
+
+ FORMAT = "%(message)s"
+ logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()])
+
+
+logger = logging.getLogger(__name__)
+
+
+options_to_group = {
+ "multi_gpu": "Distributed GPUs",
+ "tpu": "TPU",
+ "use_deepspeed": "DeepSpeed Arguments",
+ "use_fsdp": "FSDP Arguments",
+ "use_megatron_lm": "Megatron-LM Arguments",
+}
+
+
+def clean_option(option):
+ "Finds all cases of - after the first two characters and changes them to _"
+ if option.startswith("--"):
+ return option[2:].replace("-", "_")
+
+
+class CustomHelpFormatter(argparse.HelpFormatter):
+ """
+ This is a custom help formatter that will hide all arguments that are not used in the command line when the help is
+ called. This is useful for the case where the user is using a specific platform and only wants to see the arguments
+ for that platform.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.titles = [
+ "Hardware Selection Arguments",
+ "Resource Selection Arguments",
+ "Training Paradigm Arguments",
+ "positional arguments",
+ "optional arguments",
+ ]
+
+ def add_argument(self, action: argparse.Action):
+ if "accelerate" in sys.argv[0] and "launch" in sys.argv[1:]:
+ args = sys.argv[2:]
+ else:
+ args = sys.argv[1:]
+
+ if len(args) > 1:
+ args = list(map(clean_option, args))
+ used_platforms = [arg for arg in args if arg in options_to_group.keys()]
+ used_titles = [options_to_group[o] for o in used_platforms]
+ if action.container.title not in self.titles + used_titles:
+ action.help = argparse.SUPPRESS
+ elif action.container.title == "Hardware Selection Arguments":
+ if set(action.option_strings).isdisjoint(set(args)):
+ action.help = argparse.SUPPRESS
+ else:
+ action.help = action.help + " (currently selected)"
+ elif action.container.title == "Training Paradigm Arguments":
+ if set(action.option_strings).isdisjoint(set(args)):
+ action.help = argparse.SUPPRESS
+ else:
+ action.help = action.help + " (currently selected)"
+
+ action.option_strings = [s for s in action.option_strings if "-" not in s[2:]]
+ super().add_argument(action)
+
+ def end_section(self):
+ if len(self._current_section.items) < 2:
+ self._current_section.items = []
+ self._current_section.heading = ""
+ super().end_section()
+
+
+def launch_command_parser(subparsers=None):
+ description = "Launch a python script in a distributed scenario. Arguments can be passed in with either hyphens (`--num-processes=2`) or underscores (`--num_processes=2`)"
+ if subparsers is not None:
+ parser = subparsers.add_parser(
+ "launch", description=description, add_help=False, allow_abbrev=False, formatter_class=CustomHelpFormatter
+ )
+ else:
+ parser = CustomArgumentParser(
+ "Accelerate launch command",
+ description=description,
+ add_help=False,
+ allow_abbrev=False,
+ formatter_class=CustomHelpFormatter,
+ )
+
+ parser.add_argument("-h", "--help", action="help", help="Show this help message and exit.")
+
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ help="The config file to use for the default values in the launching script.",
+ )
+ parser.add_argument(
+ "--quiet",
+ "-q",
+ action="store_true",
+ help="Silence subprocess errors from the launch stack trace and only show the relevant tracebacks. (Only applicable to DeepSpeed and single-process configurations)",
+ )
+ # Hardware selection arguments
+ hardware_args = parser.add_argument_group(
+ "Hardware Selection Arguments", "Arguments for selecting the hardware to be used."
+ )
+ hardware_args.add_argument(
+ "--cpu", default=False, action="store_true", help="Whether or not to force the training on the CPU."
+ )
+ hardware_args.add_argument(
+ "--multi_gpu",
+ default=False,
+ action="store_true",
+ help="Whether or not this should launch a distributed GPU training.",
+ )
+ hardware_args.add_argument(
+ "--tpu", default=False, action="store_true", help="Whether or not this should launch a TPU training."
+ )
+ hardware_args.add_argument(
+ "--ipex",
+ default=False,
+ action="store_true",
+ help="Whether or not this should launch a Intel PyTorch Extension (IPEX) training.",
+ )
+
+ # Resource selection arguments
+ resource_args = parser.add_argument_group(
+ "Resource Selection Arguments", "Arguments for fine-tuning how available hardware should be used."
+ )
+ resource_args.add_argument(
+ "--mixed_precision",
+ type=str,
+ choices=["no", "fp16", "bf16", "fp8"],
+ help="Whether or not to use mixed precision training. "
+ "Choose between FP16 and BF16 (bfloat16) training. "
+ "BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.",
+ )
+ resource_args.add_argument(
+ "--num_processes", type=int, default=None, help="The total number of processes to be launched in parallel."
+ )
+ resource_args.add_argument(
+ "--num_machines", type=int, default=None, help="The total number of machines used in this training."
+ )
+ resource_args.add_argument(
+ "--num_cpu_threads_per_process",
+ type=int,
+ default=None,
+ help="The number of CPU threads per process. Can be tuned for optimal performance.",
+ )
+ resource_args.add_argument(
+ "--enable_cpu_affinity",
+ default=False,
+ action="store_true",
+ help="Whether or not CPU affinity and balancing should be enabled. Currently only supported on NVIDIA hardware.",
+ )
+
+ # Dynamo arguments
+ resource_args.add_argument(
+ "--dynamo_backend",
+ type=str,
+ choices=["no"] + [b.lower() for b in DYNAMO_BACKENDS],
+ help="Choose a backend to optimize your training with dynamo, see more at "
+ "https://github.com/pytorch/torchdynamo.",
+ )
+ resource_args.add_argument(
+ "--dynamo_mode",
+ type=str,
+ default="default",
+ choices=TORCH_DYNAMO_MODES,
+ help="Choose a mode to optimize your training with dynamo.",
+ )
+ resource_args.add_argument(
+ "--dynamo_use_fullgraph",
+ default=False,
+ action="store_true",
+ help="Whether to use full graph mode for dynamo or it is ok to break model into several subgraphs",
+ )
+ resource_args.add_argument(
+ "--dynamo_use_dynamic",
+ default=False,
+ action="store_true",
+ help="Whether to enable dynamic shape tracing.",
+ )
+
+ # Training Paradigm arguments
+ paradigm_args = parser.add_argument_group(
+ "Training Paradigm Arguments", "Arguments for selecting which training paradigm to be used."
+ )
+ paradigm_args.add_argument(
+ "--use_deepspeed",
+ default=False,
+ action="store_true",
+ help="Whether to use deepspeed.",
+ )
+ paradigm_args.add_argument(
+ "--use_fsdp",
+ default=False,
+ action="store_true",
+ help="Whether to use fsdp.",
+ )
+ paradigm_args.add_argument(
+ "--use_megatron_lm",
+ default=False,
+ action="store_true",
+ help="Whether to use Megatron-LM.",
+ )
+ paradigm_args.add_argument(
+ "--use_xpu",
+ default=False,
+ action="store_true",
+ help="Whether to use IPEX plugin to speed up training on XPU specifically.",
+ )
+
+ # distributed GPU training arguments
+ distributed_args = parser.add_argument_group("Distributed GPUs", "Arguments related to distributed GPU training.")
+ distributed_args.add_argument(
+ "--gpu_ids",
+ default=None,
+ help="What GPUs (by id) should be used for training on this machine as a comma-seperated list",
+ )
+ distributed_args.add_argument(
+ "--same_network",
+ default=False,
+ action="store_true",
+ help="Whether all machines used for multinode training exist on the same local network.",
+ )
+ distributed_args.add_argument(
+ "--machine_rank", type=int, default=None, help="The rank of the machine on which this script is launched."
+ )
+ distributed_args.add_argument(
+ "--main_process_ip", type=str, default=None, help="The IP address of the machine of rank 0."
+ )
+ distributed_args.add_argument(
+ "--main_process_port",
+ type=int,
+ default=None,
+ help="The port to use to communicate with the machine of rank 0.",
+ )
+ distributed_args.add_argument(
+ "-t",
+ "--tee",
+ default="0",
+ type=str,
+ help="Tee std streams into a log file and also to console.",
+ )
+ distributed_args.add_argument(
+ "--role",
+ type=str,
+ default="default",
+ help="User-defined role for the workers.",
+ )
+ # Rendezvous related arguments
+ distributed_args.add_argument(
+ "--rdzv_backend",
+ type=str,
+ default="static",
+ help="The rendezvous method to use, such as 'static' (the default) or 'c10d'",
+ )
+ distributed_args.add_argument(
+ "--rdzv_conf",
+ type=str,
+ default="",
+ help="Additional rendezvous configuration (=,=,...).",
+ )
+ distributed_args.add_argument(
+ "--max_restarts",
+ type=int,
+ default=0,
+ help="Maximum number of worker group restarts before failing.",
+ )
+ distributed_args.add_argument(
+ "--monitor_interval",
+ type=float,
+ default=5,
+ help="Interval, in seconds, to monitor the state of workers.",
+ )
+ parser.add_argument(
+ "-m",
+ "--module",
+ action="store_true",
+ help="Change each process to interpret the launch script as a Python module, executing with the same behavior as 'python -m'.",
+ )
+ parser.add_argument(
+ "--no_python",
+ action="store_true",
+ help="Skip prepending the training script with 'python' - just execute it directly. Useful when the script is not a Python script.",
+ )
+
+ # TPU arguments
+ tpu_args = parser.add_argument_group("TPU", "Arguments related to TPU.")
+ tpu_args.add_argument(
+ "--tpu_cluster",
+ action="store_true",
+ dest="tpu_use_cluster",
+ help="Whether to use a GCP TPU pod for training.",
+ )
+ tpu_args.add_argument(
+ "--no_tpu_cluster",
+ action="store_false",
+ dest="tpu_use_cluster",
+ help="Should not be passed explicitly, this is for internal use only.",
+ )
+ tpu_args.add_argument(
+ "--tpu_use_sudo",
+ action="store_true",
+ help="Whether to use `sudo` when running the TPU training script in each pod.",
+ )
+ tpu_args.add_argument(
+ "--vm",
+ type=str,
+ action="append",
+ help=(
+ "List of single Compute VM instance names. "
+ "If not provided we assume usage of instance groups. For TPU pods."
+ ),
+ )
+ tpu_args.add_argument(
+ "--env",
+ type=str,
+ action="append",
+ help="List of environment variables to set on the Compute VM instances. For TPU pods.",
+ )
+ tpu_args.add_argument(
+ "--main_training_function",
+ type=str,
+ default=None,
+ help="The name of the main function to be executed in your script (only for TPU training).",
+ )
+ tpu_args.add_argument(
+ "--downcast_bf16",
+ action="store_true",
+ help="Whether when using bf16 precision on TPUs if both float and double tensors are cast to bfloat16 or if double tensors remain as float32.",
+ )
+
+ # DeepSpeed arguments
+ deepspeed_args = parser.add_argument_group("DeepSpeed Arguments", "Arguments related to DeepSpeed.")
+ deepspeed_args.add_argument(
+ "--deepspeed_config_file",
+ default=None,
+ type=str,
+ help="DeepSpeed config file.",
+ )
+ deepspeed_args.add_argument(
+ "--zero_stage",
+ default=None,
+ type=int,
+ help="DeepSpeed's ZeRO optimization stage (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to `2`.",
+ )
+ deepspeed_args.add_argument(
+ "--offload_optimizer_device",
+ default=None,
+ type=str,
+ help="Decides where (none|cpu|nvme) to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to 'none'.",
+ )
+ deepspeed_args.add_argument(
+ "--offload_param_device",
+ default=None,
+ type=str,
+ help="Decides where (none|cpu|nvme) to offload parameters (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to 'none'.",
+ )
+ deepspeed_args.add_argument(
+ "--offload_optimizer_nvme_path",
+ default=None,
+ type=str,
+ help="Decides Nvme Path to offload optimizer states (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to 'none'.",
+ )
+ deepspeed_args.add_argument(
+ "--offload_param_nvme_path",
+ default=None,
+ type=str,
+ help="Decides Nvme Path to offload parameters (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to 'none'.",
+ )
+ deepspeed_args.add_argument(
+ "--gradient_accumulation_steps",
+ default=None,
+ type=int,
+ help="No of gradient_accumulation_steps used in your training script (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to `1`.",
+ )
+ deepspeed_args.add_argument(
+ "--gradient_clipping",
+ default=None,
+ type=float,
+ help="gradient clipping value used in your training script (useful only when `use_deepspeed` flag is passed). "
+ "If unspecified, will default to `1.0`.",
+ )
+ deepspeed_args.add_argument(
+ "--zero3_init_flag",
+ default=None,
+ type=str,
+ help="Decides Whether (true|false) to enable `deepspeed.zero.Init` for constructing massive models. "
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `true`.",
+ )
+ deepspeed_args.add_argument(
+ "--zero3_save_16bit_model",
+ default=None,
+ type=str,
+ help="Decides Whether (true|false) to save 16-bit model weights when using ZeRO Stage-3. "
+ "Only applicable with DeepSpeed ZeRO Stage-3. If unspecified, will default to `false`.",
+ )
+ deepspeed_args.add_argument(
+ "--deepspeed_hostfile",
+ default=None,
+ type=str,
+ help="DeepSpeed hostfile for configuring multi-node compute resources.",
+ )
+ deepspeed_args.add_argument(
+ "--deepspeed_exclusion_filter",
+ default=None,
+ type=str,
+ help="DeepSpeed exclusion filter string when using mutli-node setup.",
+ )
+ deepspeed_args.add_argument(
+ "--deepspeed_inclusion_filter",
+ default=None,
+ type=str,
+ help="DeepSpeed inclusion filter string when using mutli-node setup.",
+ )
+ deepspeed_args.add_argument(
+ "--deepspeed_multinode_launcher",
+ default=None,
+ type=str,
+ help="DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.",
+ )
+
+ # fsdp arguments
+ fsdp_args = parser.add_argument_group("FSDP Arguments", "Arguments related to Fully Shared Data Parallelism.")
+ fsdp_args.add_argument(
+ "--fsdp_offload_params",
+ default="false",
+ type=str,
+ help="Decides Whether (true|false) to offload parameters and gradients to CPU. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_min_num_params",
+ type=int,
+ default=1e8,
+ help="FSDP's minimum number of parameters for Default Auto Wrapping. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_sharding_strategy",
+ type=str,
+ default="FULL_SHARD",
+ help="FSDP's Sharding Strategy. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_auto_wrap_policy",
+ type=str,
+ default=None,
+ help="FSDP's auto wrap policy. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_transformer_layer_cls_to_wrap",
+ default=None,
+ type=str,
+ help="Transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... "
+ "(useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_backward_prefetch_policy",
+ default=None,
+ type=str,
+ help="This argument is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use `fsdp_backward_prefetch` instead.",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_backward_prefetch",
+ default=None,
+ type=str,
+ help="FSDP's backward prefetch policy. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_state_dict_type",
+ default=None,
+ type=str,
+ help="FSDP's state dict type. (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_forward_prefetch",
+ default="false",
+ type=str,
+ help="If True, then FSDP explicitly prefetches the next upcoming "
+ "all-gather while executing in the forward pass (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_use_orig_params",
+ default="true",
+ type=str,
+ help="If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres."
+ " (useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_cpu_ram_efficient_loading",
+ default="true",
+ type=str,
+ help="If True, only the first process loads the pretrained model checkoint while all other processes have empty weights. "
+ "Only applicable for 🤗 Transformers. When using this, `--fsdp_sync_module_states` needs to True. "
+ "(useful only when `use_fsdp` flag is passed).",
+ )
+ fsdp_args.add_argument(
+ "--fsdp_sync_module_states",
+ default="true",
+ type=str,
+ help="If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0."
+ " (useful only when `use_fsdp` flag is passed).",
+ )
+
+ # megatron_lm args
+ megatron_lm_args = parser.add_argument_group("Megatron-LM Arguments", "Arguments related to Megatron-LM.")
+ megatron_lm_args.add_argument(
+ "--megatron_lm_tp_degree",
+ type=int,
+ default=1,
+ help="Megatron-LM's Tensor Parallelism (TP) degree. (useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_pp_degree",
+ type=int,
+ default=1,
+ help="Megatron-LM's Pipeline Parallelism (PP) degree. (useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_num_micro_batches",
+ type=int,
+ default=None,
+ help="Megatron-LM's number of micro batches when PP degree > 1. (useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_sequence_parallelism",
+ default=None,
+ type=str,
+ help="Decides Whether (true|false) to enable Sequence Parallelism when TP degree > 1. "
+ "(useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_recompute_activations",
+ default=None,
+ type=str,
+ help="Decides Whether (true|false) to enable Selective Activation Recomputation. "
+ "(useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_use_distributed_optimizer",
+ default=None,
+ type=str,
+ help="Decides Whether (true|false) to use distributed optimizer "
+ "which shards optimizer state and gradients across Data Pralellel (DP) ranks. "
+ "(useful only when `use_megatron_lm` flag is passed).",
+ )
+ megatron_lm_args.add_argument(
+ "--megatron_lm_gradient_clipping",
+ default=1.0,
+ type=float,
+ help="Megatron-LM's gradient clipping value based on global L2 Norm (0 to disable). "
+ "(useful only when `use_megatron_lm` flag is passed).",
+ )
+
+ # AWS arguments
+ aws_args = parser.add_argument_group("AWS Arguments", "Arguments related to AWS.")
+ aws_args.add_argument(
+ "--aws_access_key_id",
+ type=str,
+ default=None,
+ help="The AWS_ACCESS_KEY_ID used to launch the Amazon SageMaker training job",
+ )
+ aws_args.add_argument(
+ "--aws_secret_access_key",
+ type=str,
+ default=None,
+ help="The AWS_SECRET_ACCESS_KEY used to launch the Amazon SageMaker training job.",
+ )
+ parser.add_argument(
+ "--debug",
+ action="store_true",
+ help="Whether to print out the torch.distributed stack trace when something fails.",
+ )
+ parser.add_argument(
+ "training_script",
+ type=str,
+ help=(
+ "The full path to the script to be launched in parallel, followed by all the arguments for the training "
+ "script."
+ ),
+ )
+
+ # MPI arguments
+ mpirun_args = parser.add_argument_group("MPI Arguments", "Arguments related to mpirun for Multi-CPU")
+ mpirun_args.add_argument(
+ "--mpirun_hostfile",
+ type=str,
+ default=None,
+ help="Location for a hostfile for using Accelerate to launch a multi-CPU training job with mpirun. This will "
+ "get passed to the MPI --hostfile or -f parameter, depending on which MPI program is installed.",
+ )
+ mpirun_args.add_argument(
+ "--mpirun_ccl",
+ type=int,
+ default=1,
+ help="The number of oneCCL worker threads when using Accelerate to launch multi-CPU training with mpirun.",
+ )
+
+ # Other arguments of the training scripts
+ parser.add_argument("training_script_args", nargs=argparse.REMAINDER, help="Arguments of the training script.")
+
+ if subparsers is not None:
+ parser.set_defaults(func=launch_command)
+ return parser
+
+
+def simple_launcher(args):
+ cmd, current_env = prepare_simple_launcher_cmd_env(args)
+
+ process = subprocess.Popen(cmd, env=current_env)
+ process.wait()
+ if process.returncode != 0:
+ if not args.quiet:
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
+ else:
+ sys.exit(1)
+
+
+def multi_gpu_launcher(args):
+ import torch.distributed.run as distrib_run
+
+ current_env = prepare_multi_gpu_env(args)
+ if not check_cuda_p2p_ib_support():
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
+ warn = False
+ if "NCCL_P2P_DISABLE" not in current_env:
+ current_env["NCCL_P2P_DISABLE"] = "1"
+ warn = True
+ if "NCCL_IB_DISABLE" not in current_env:
+ current_env["NCCL_IB_DISABLE"] = "1"
+ warn = True
+ if warn:
+ logger.warning(message)
+
+ debug = getattr(args, "debug", False)
+ args = _filter_args(
+ args,
+ distrib_run.get_args_parser(),
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
+ )
+
+ with patch_environment(**current_env):
+ try:
+ distrib_run.run(args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+
+def deepspeed_launcher(args):
+ import torch.distributed.run as distrib_run
+
+ if not is_deepspeed_available():
+ raise ImportError("DeepSpeed is not installed => run `pip3 install deepspeed` or build it from source.")
+ else:
+ from deepspeed.launcher.runner import DEEPSPEED_ENVIRONMENT_NAME
+
+ cmd, current_env = prepare_deepspeed_cmd_env(args)
+ if not check_cuda_p2p_ib_support():
+ message = "Using RTX 4000 series which doesn't support faster communication speedups. Ensuring P2P and IB communications are disabled."
+ warn = False
+ if "NCCL_P2P_DISABLE" not in current_env:
+ current_env["NCCL_P2P_DISABLE"] = "1"
+ warn = True
+ if "NCCL_IB_DISABLE" not in current_env:
+ current_env["NCCL_IB_DISABLE"] = "1"
+ warn = True
+ if warn:
+ logger.warning(message)
+
+ if args.num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]:
+ with open(DEEPSPEED_ENVIRONMENT_NAME, "a") as f:
+ valid_env_items = convert_dict_to_env_variables(current_env)
+ if len(valid_env_items) > 1:
+ f.writelines(valid_env_items)
+
+ process = subprocess.Popen(cmd, env=current_env)
+ process.wait()
+ if process.returncode != 0:
+ if not args.quiet:
+ raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
+ else:
+ sys.exit(1)
+ else:
+ debug = getattr(args, "debug", False)
+ args = _filter_args(
+ args,
+ distrib_run.get_args_parser(),
+ ["--training_script", args.training_script, "--training_script_args", args.training_script_args],
+ )
+ with patch_environment(**current_env):
+ try:
+ distrib_run.run(args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print("\n[bold red]Using --debug, `torch.distributed` Stack Trace:[/bold red]")
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+
+def tpu_launcher(args):
+ import torch_xla.distributed.xla_multiprocessing as xmp
+
+ if args.no_python:
+ raise ValueError("--no_python cannot be used with TPU launcher")
+
+ args, current_env = prepare_tpu(args, {})
+
+ if args.module:
+ mod_name = args.training_script
+ else:
+ # Import training_script as a module
+ script_path = Path(args.training_script)
+ sys.path.append(str(script_path.parent.resolve()))
+ mod_name = script_path.stem
+
+ mod = importlib.import_module(mod_name)
+ if not hasattr(mod, args.main_training_function):
+ raise ValueError(
+ f"Your training script should have a function named {args.main_training_function}, or you should pass a "
+ "different value to `--main_training_function`."
+ )
+
+ # Patch sys.argv
+ sys.argv = [mod.__file__] + args.training_script_args
+
+ main_function = getattr(mod, args.main_training_function)
+ with patch_environment(**current_env):
+ xmp.spawn(PrepareForLaunch(main_function), args=(), nprocs=args.num_processes)
+
+
+def tpu_pod_launcher(args):
+ from torch_xla.distributed import xla_dist
+
+ current_env = {}
+ args, current_env = prepare_tpu(args, current_env, True)
+ debug = getattr(args, "debug", False)
+
+ training_script = args.training_script
+ training_script_args = args.training_script_args
+ new_args = _filter_args(
+ args, xla_dist.get_args_parser(), ["--tpu", args.tpu_name, "--positional", "", "--restart-tpuvm-pod-server"]
+ )
+
+ if args.tpu_use_sudo:
+ new_cmd = ["sudo"]
+ else:
+ new_cmd = []
+
+ new_cmd += [
+ "accelerate-launch",
+ "--tpu",
+ "--no_tpu_cluster",
+ "--num_machines",
+ "1",
+ "--mixed_precision",
+ "no",
+ "--dynamo_backend",
+ "no",
+ "--num_processes",
+ str(args.num_processes),
+ "--main_training_function",
+ str(args.main_training_function),
+ training_script,
+ ] + training_script_args
+
+ new_args.positional = new_cmd
+ bad_flags = ""
+ for arg in vars(new_args):
+ if arg.startswith("docker_"):
+ value = getattr(new_args, arg)
+ if value != "" and value is not None:
+ bad_flags += f'{arg}="{value}"\n'
+ if bad_flags != "":
+ raise ValueError(
+ f"Docker containers are not supported for TPU pod launcher currently, please remove the following flags:\n{bad_flags}"
+ )
+ new_args.env = [f"{k}={v}" for k, v in current_env.items()]
+ new_args.env.append("ACCELERATE_IN_TPU_POD=1")
+ try:
+ xla_dist.resolve_and_execute(new_args)
+ except Exception:
+ if is_rich_available() and debug:
+ console = get_console()
+ console.print("\n[bold red]Using --debug, `torch_xla.xla_dist` Stack Trace:[/bold red]")
+ console.print_exception(suppress=[__file__], show_locals=False)
+ else:
+ raise
+
+
+def sagemaker_launcher(sagemaker_config: SageMakerConfig, args):
+ if not is_sagemaker_available():
+ raise ImportError(
+ "Please install sagemaker to be able to launch training on Amazon SageMaker with `pip install accelerate[sagemaker]`"
+ )
+ if args.module or args.no_python:
+ raise ValueError(
+ "SageMaker requires a python training script file and cannot be used with --module or --no_python"
+ )
+
+ from sagemaker.huggingface import HuggingFace
+
+ args, sagemaker_inputs = prepare_sagemager_args_inputs(sagemaker_config, args)
+
+ huggingface_estimator = HuggingFace(**args)
+
+ huggingface_estimator.fit(inputs=sagemaker_inputs)
+ print(f"You can find your model data at: {huggingface_estimator.model_data}")
+
+
+def _validate_launch_command(args):
+ # Sanity checks
+ if sum([args.multi_gpu, args.cpu, args.tpu, args.use_deepspeed, args.use_fsdp]) > 1:
+ raise ValueError(
+ "You can only use one of `--cpu`, `--multi_gpu`, `--tpu`, `--use_deepspeed`, `--use_fsdp` at a time."
+ )
+ if args.multi_gpu and (args.num_processes is not None) and (args.num_processes < 2):
+ raise ValueError("You need to use at least 2 processes to use `--multi_gpu`.")
+
+ defaults = None
+ warned = []
+ mp_from_config_flag = False
+ # Get the default from the config file.
+ if args.config_file is not None or os.path.isfile(default_config_file) and not args.cpu:
+ defaults = load_config_from_file(args.config_file)
+ if (
+ not args.multi_gpu
+ and not args.tpu
+ and not args.tpu_use_cluster
+ and not args.use_deepspeed
+ and not args.use_fsdp
+ and not args.use_megatron_lm
+ ):
+ args.use_deepspeed = defaults.distributed_type == DistributedType.DEEPSPEED
+ args.multi_gpu = (
+ True
+ if defaults.distributed_type
+ in (
+ DistributedType.MULTI_GPU,
+ DistributedType.MULTI_NPU,
+ DistributedType.MULTI_MLU,
+ DistributedType.MULTI_XPU,
+ )
+ else False
+ )
+ args.tpu = defaults.distributed_type == DistributedType.XLA
+ args.use_fsdp = defaults.distributed_type == DistributedType.FSDP
+ args.use_megatron_lm = defaults.distributed_type == DistributedType.MEGATRON_LM
+ args.tpu_use_cluster = defaults.tpu_use_cluster if args.tpu else False
+ if args.gpu_ids is None:
+ if defaults.gpu_ids is not None:
+ args.gpu_ids = defaults.gpu_ids
+ else:
+ args.gpu_ids = "all"
+
+ if args.multi_gpu and args.num_machines is None:
+ args.num_machines = defaults.num_machines
+
+ if len(args.gpu_ids.split(",")) < 2 and (args.gpu_ids != "all") and args.multi_gpu and args.num_machines <= 1:
+ raise ValueError(
+ "Less than two GPU ids were configured and tried to run on on multiple GPUs. "
+ "Please ensure at least two are specified for `--gpu_ids`, or use `--gpu_ids='all'`."
+ )
+ if defaults.compute_environment == ComputeEnvironment.LOCAL_MACHINE:
+ # Update args with the defaults
+ for name, attr in defaults.__dict__.items():
+ if isinstance(attr, dict):
+ for k in defaults.deepspeed_config:
+ setattr(args, k, defaults.deepspeed_config[k])
+ for k in defaults.fsdp_config:
+ arg_to_set = k
+ if "fsdp" not in arg_to_set:
+ arg_to_set = "fsdp_" + arg_to_set
+ setattr(args, arg_to_set, defaults.fsdp_config[k])
+ for k in defaults.megatron_lm_config:
+ setattr(args, k, defaults.megatron_lm_config[k])
+ for k in defaults.dynamo_config:
+ setattr(args, k, defaults.dynamo_config[k])
+ for k in defaults.ipex_config:
+ setattr(args, k, defaults.ipex_config[k])
+ for k in defaults.mpirun_config:
+ setattr(args, k, defaults.mpirun_config[k])
+ continue
+
+ # Those args are handled separately
+ if (
+ name not in ["compute_environment", "mixed_precision", "distributed_type"]
+ and getattr(args, name, None) is None
+ ):
+ setattr(args, name, attr)
+ if not args.debug:
+ args.debug = defaults.debug
+
+ if not args.mixed_precision:
+ if defaults.mixed_precision is None:
+ args.mixed_precision = "no"
+ else:
+ args.mixed_precision = defaults.mixed_precision
+ mp_from_config_flag = True
+ else:
+ if args.use_cpu or (args.use_xpu and torch.xpu.is_available()):
+ native_amp = is_torch_version(">=", "1.10")
+ else:
+ native_amp = is_bf16_available(True)
+ if (
+ args.mixed_precision == "bf16"
+ and not native_amp
+ and not (args.tpu and is_torch_xla_available(check_is_tpu=True))
+ ):
+ raise ValueError("bf16 mixed precision requires PyTorch >= 1.10 and a supported device.")
+
+ # Silently set the default here
+ if args.dynamo_backend is None:
+ args.dynamo_backend = "no"
+ else:
+ if args.num_processes is None:
+ if args.use_xpu and is_xpu_available():
+ args.num_processes = torch.xpu.device_count()
+ elif is_mlu_available():
+ args.num_processes = torch.mlu.device_count()
+ elif is_npu_available():
+ args.num_processes = torch.npu.device_count()
+ else:
+ args.num_processes = torch.cuda.device_count()
+ warned.append(f"\t`--num_processes` was set to a value of `{args.num_processes}`")
+ if args.debug is None:
+ args.debug = False
+ if not args.multi_gpu and (
+ (args.use_xpu and is_xpu_available() and torch.xpu.device_count() > 1)
+ or (is_mlu_available() and torch.mlu.device_count() > 1)
+ or (is_npu_available() and torch.npu.device_count() > 1)
+ or (torch.cuda.device_count() > 1)
+ ):
+ warned.append(
+ "\t\tMore than one GPU was found, enabling multi-GPU training.\n"
+ "\t\tIf this was unintended please pass in `--num_processes=1`."
+ )
+ args.multi_gpu = True
+ if args.num_machines is None:
+ warned.append("\t`--num_machines` was set to a value of `1`")
+ args.num_machines = 1
+ if args.mixed_precision is None:
+ warned.append("\t`--mixed_precision` was set to a value of `'no'`")
+ args.mixed_precision = "no"
+ if not hasattr(args, "use_cpu"):
+ args.use_cpu = args.cpu
+ if args.dynamo_backend is None:
+ warned.append("\t`--dynamo_backend` was set to a value of `'no'`")
+ args.dynamo_backend = "no"
+ if args.debug:
+ logger.debug("Running script in debug mode, expect distributed operations to be slightly slower.")
+
+ is_aws_env_disabled = defaults is None or (
+ defaults is not None and defaults.compute_environment != ComputeEnvironment.AMAZON_SAGEMAKER
+ )
+ if is_aws_env_disabled and args.num_cpu_threads_per_process is None:
+ args.num_cpu_threads_per_process = 1
+ if args.use_cpu and args.num_processes >= 1:
+ local_size = get_int_from_env(
+ ["MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1
+ )
+ threads_per_process = int(psutil.cpu_count(logical=False) / local_size)
+ if threads_per_process > 1:
+ args.num_cpu_threads_per_process = threads_per_process
+ warned.append(
+ f"\t`--num_cpu_threads_per_process` was set to `{args.num_cpu_threads_per_process}` to improve out-of-box performance when training on CPUs"
+ )
+
+ if any(warned):
+ message = "The following values were not passed to `accelerate launch` and had defaults used instead:\n"
+ message += "\n".join(warned)
+ message += (
+ "\nTo avoid this warning pass in values for each of the problematic parameters or run `accelerate config`."
+ )
+ logger.warning(message)
+ return args, defaults, mp_from_config_flag
+
+
+def launch_command(args):
+ args, defaults, mp_from_config_flag = _validate_launch_command(args)
+ # Use the proper launcher
+ if args.use_deepspeed and not args.cpu:
+ args.deepspeed_fields_from_accelerate_config = list(defaults.deepspeed_config.keys()) if defaults else []
+ if mp_from_config_flag:
+ args.deepspeed_fields_from_accelerate_config.append("mixed_precision")
+ args.deepspeed_fields_from_accelerate_config = ",".join(args.deepspeed_fields_from_accelerate_config)
+ deepspeed_launcher(args)
+ elif args.use_fsdp and not args.cpu:
+ multi_gpu_launcher(args)
+ elif args.use_megatron_lm and not args.cpu:
+ multi_gpu_launcher(args)
+ elif args.multi_gpu and not args.cpu:
+ multi_gpu_launcher(args)
+ elif args.tpu and not args.cpu:
+ if args.tpu_use_cluster:
+ tpu_pod_launcher(args)
+ else:
+ tpu_launcher(args)
+ elif defaults is not None and defaults.compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
+ sagemaker_launcher(defaults, args)
+ else:
+ simple_launcher(args)
+
+
+def main():
+ parser = launch_command_parser()
+ args = parser.parse_args()
+ launch_command(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2c851cc0b192ab8207d3fa68d7409868c84354c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from .selection_menu import BulletMenu
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ecae91ba06e13f0de1cac2bcc8a029f2c4d95b89
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..612995181690359ce4be2160e93238a6f949e4ac
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/cursor.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86ad1958e71334c6473c5e2e717bb1d91bd5ef1f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/helpers.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11c4f011c6230d91fe7f66f21a256d5e44b94b23
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/input.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e64884935121167675f34d588fe9a36784a29d56
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/keymap.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79307f3fdfff5227539c5fca75ef22919163eed1
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/__pycache__/selection_menu.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1f0bb7b68025ae4fe0c2c76c095eb36b4e64f2c
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/cursor.py
@@ -0,0 +1,65 @@
+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet
+"""
+
+import os
+import sys
+from contextlib import contextmanager
+
+
+# Windows only
+if os.name == "nt":
+ import ctypes
+ import msvcrt # noqa
+
+ class CursorInfo(ctypes.Structure):
+ # _fields is a specific attr expected by ctypes
+ _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
+
+
+def hide_cursor():
+ if os.name == "nt":
+ ci = CursorInfo()
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+ ci.visible = False
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+ elif os.name == "posix":
+ sys.stdout.write("\033[?25l")
+ sys.stdout.flush()
+
+
+def show_cursor():
+ if os.name == "nt":
+ ci = CursorInfo()
+ handle = ctypes.windll.kernel32.GetStdHandle(-11)
+ ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci))
+ ci.visible = True
+ ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci))
+ elif os.name == "posix":
+ sys.stdout.write("\033[?25h")
+ sys.stdout.flush()
+
+
+@contextmanager
+def hide():
+ "Context manager to hide the terminal cursor"
+ try:
+ hide_cursor()
+ yield
+ finally:
+ show_cursor()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..de46f37ddcf4591167e3e01791391e4b1729034f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/helpers.py
@@ -0,0 +1,59 @@
+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+A variety of helper functions and constants when dealing with terminal menu choices, based on
+https://github.com/bchao1/bullet
+"""
+
+import enum
+import shutil
+import sys
+
+
+TERMINAL_WIDTH, _ = shutil.get_terminal_size()
+
+CURSOR_TO_CHAR = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
+
+
+class Direction(enum.Enum):
+ UP = 0
+ DOWN = 1
+
+
+def forceWrite(content, end=""):
+ sys.stdout.write(str(content) + end)
+ sys.stdout.flush()
+
+
+def writeColor(content, color, end=""):
+ forceWrite(f"\u001b[{color}m{content}\u001b[0m", end)
+
+
+def reset_cursor():
+ forceWrite("\r")
+
+
+def move_cursor(num_lines: int, direction: str):
+ forceWrite(f"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}")
+
+
+def clear_line():
+ forceWrite(" " * TERMINAL_WIDTH)
+ reset_cursor()
+
+
+def linebreak():
+ reset_cursor()
+ forceWrite("-" * TERMINAL_WIDTH)
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/input.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/input.py
new file mode 100644
index 0000000000000000000000000000000000000000..2690f86aa61f7ac648f4a9c2040a34ee35147201
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/input.py
@@ -0,0 +1,86 @@
+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+This file contains utilities for handling input from the user and registering specific keys to specific functions,
+based on https://github.com/bchao1/bullet
+"""
+
+from typing import List
+
+from .keymap import KEYMAP, get_character
+
+
+def mark(key: str):
+ """
+ Mark the function with the key code so it can be handled in the register
+ """
+
+ def decorator(func):
+ handle = getattr(func, "handle_key", [])
+ handle += [key]
+ func.handle_key = handle
+ return func
+
+ return decorator
+
+
+def mark_multiple(*keys: List[str]):
+ """
+ Mark the function with the key codes so it can be handled in the register
+ """
+
+ def decorator(func):
+ handle = getattr(func, "handle_key", [])
+ handle += keys
+ func.handle_key = handle
+ return func
+
+ return decorator
+
+
+class KeyHandler(type):
+ """
+ Metaclass that adds the key handlers to the class
+ """
+
+ def __new__(cls, name, bases, attrs):
+ new_cls = super().__new__(cls, name, bases, attrs)
+ if not hasattr(new_cls, "key_handler"):
+ new_cls.key_handler = {}
+ new_cls.handle_input = KeyHandler.handle_input
+
+ for value in attrs.values():
+ handled_keys = getattr(value, "handle_key", [])
+ for key in handled_keys:
+ new_cls.key_handler[key] = value
+ return new_cls
+
+ @staticmethod
+ def handle_input(cls):
+ "Finds and returns the selected character if it exists in the handler"
+ char = get_character()
+ if char != KEYMAP["undefined"]:
+ char = ord(char)
+ handler = cls.key_handler.get(char)
+ if handler:
+ cls.current_selection = char
+ return handler(cls)
+ else:
+ return None
+
+
+def register(cls):
+ """Adds KeyHandler metaclass to the class"""
+ return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy())
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py
new file mode 100644
index 0000000000000000000000000000000000000000..787db12860fe21c6786dda69c34fcccab114f2f8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/keymap.py
@@ -0,0 +1,133 @@
+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Utilities relating to parsing raw characters from the keyboard, based on https://github.com/bchao1/bullet
+"""
+
+import os
+import string
+import sys
+
+
+ARROW_KEY_FLAG = 1 << 8
+
+KEYMAP = {
+ "tab": ord("\t"),
+ "newline": ord("\r"),
+ "esc": 27,
+ "up": 65 + ARROW_KEY_FLAG,
+ "down": 66 + ARROW_KEY_FLAG,
+ "right": 67 + ARROW_KEY_FLAG,
+ "left": 68 + ARROW_KEY_FLAG,
+ "mod_int": 91,
+ "undefined": sys.maxsize,
+ "interrupt": 3,
+ "insert": 50,
+ "delete": 51,
+ "pg_up": 53,
+ "pg_down": 54,
+}
+
+KEYMAP["arrow_begin"] = KEYMAP["up"]
+KEYMAP["arrow_end"] = KEYMAP["left"]
+
+if sys.platform == "win32":
+ WIN_CH_BUFFER = []
+ WIN_KEYMAP = {
+ b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
+ b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
+ b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
+ b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
+ b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
+ b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
+ b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
+ b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
+ }
+
+for i in range(10):
+ KEYMAP[str(i)] = ord(str(i))
+
+
+def get_raw_chars():
+ "Gets raw characters from inputs"
+ if os.name == "nt":
+ import msvcrt
+
+ encoding = "mbcs"
+ # Flush the keyboard buffer
+ while msvcrt.kbhit():
+ msvcrt.getch()
+ if len(WIN_CH_BUFFER) == 0:
+ # Read the keystroke
+ ch = msvcrt.getch()
+
+ # If it is a prefix char, get second part
+ if ch in (b"\x00", b"\xe0"):
+ ch2 = ch + msvcrt.getch()
+ # Translate actual Win chars to bullet char types
+ try:
+ chx = chr(WIN_KEYMAP[ch2])
+ WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"]))
+ WIN_CH_BUFFER.append(chx)
+ if ord(chx) in (
+ KEYMAP["insert"] - 1 << 9,
+ KEYMAP["delete"] - 1 << 9,
+ KEYMAP["pg_up"] - 1 << 9,
+ KEYMAP["pg_down"] - 1 << 9,
+ ):
+ WIN_CH_BUFFER.append(chr(126))
+ ch = chr(KEYMAP["esc"])
+ except KeyError:
+ ch = ch2[1]
+ else:
+ ch = ch.decode(encoding)
+ else:
+ ch = WIN_CH_BUFFER.pop(0)
+ elif os.name == "posix":
+ import termios
+ import tty
+
+ fd = sys.stdin.fileno()
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ ch = sys.stdin.read(1)
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ return ch
+
+
+def get_character():
+ "Gets a character from the keyboard and returns the key code"
+ char = get_raw_chars()
+ if ord(char) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
+ return char
+
+ elif ord(char) == KEYMAP["esc"]:
+ combo = get_raw_chars()
+ if ord(combo) == KEYMAP["mod_int"]:
+ key = get_raw_chars()
+ if ord(key) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(key) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
+ return chr(ord(key) + ARROW_KEY_FLAG)
+ else:
+ return KEYMAP["undefined"]
+ else:
+ return get_raw_chars()
+
+ else:
+ if char in string.printable:
+ return char
+ else:
+ return KEYMAP["undefined"]
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee9a771a54ef666ee46b67ae6c75fb957d49efdd
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/menu/selection_menu.py
@@ -0,0 +1,144 @@
+# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Main driver for the selection menu, based on https://github.com/bchao1/bullet
+"""
+
+import builtins
+import sys
+
+from ...utils.imports import _is_package_available
+from . import cursor, input
+from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
+from .keymap import KEYMAP
+
+
+in_colab = False
+try:
+ in_colab = _is_package_available("google.colab")
+except ModuleNotFoundError:
+ pass
+
+
+@input.register
+class BulletMenu:
+ """
+ A CLI menu to select a choice from a list of choices using the keyboard.
+ """
+
+ def __init__(self, prompt: str = None, choices: list = []):
+ self.position = 0
+ self.choices = choices
+ self.prompt = prompt
+ if sys.platform == "win32":
+ self.arrow_char = "*"
+ else:
+ self.arrow_char = "➔ "
+
+ def write_choice(self, index, end: str = ""):
+ if sys.platform != "win32":
+ writeColor(self.choices[index], 32, end)
+ else:
+ forceWrite(self.choices[index], end)
+
+ def print_choice(self, index: int):
+ "Prints the choice at the given index"
+ if index == self.position:
+ forceWrite(f" {self.arrow_char} ")
+ self.write_choice(index)
+ else:
+ forceWrite(f" {self.choices[index]}")
+ reset_cursor()
+
+ def move_direction(self, direction: Direction, num_spaces: int = 1):
+ "Should not be directly called, used to move a direction of either up or down"
+ old_position = self.position
+ if direction == Direction.DOWN:
+ if self.position + 1 >= len(self.choices):
+ return
+ self.position += num_spaces
+ else:
+ if self.position - 1 < 0:
+ return
+ self.position -= num_spaces
+ clear_line()
+ self.print_choice(old_position)
+ move_cursor(num_spaces, direction.name)
+ self.print_choice(self.position)
+
+ @input.mark(KEYMAP["up"])
+ def move_up(self):
+ self.move_direction(Direction.UP)
+
+ @input.mark(KEYMAP["down"])
+ def move_down(self):
+ self.move_direction(Direction.DOWN)
+
+ @input.mark(KEYMAP["newline"])
+ def select(self):
+ move_cursor(len(self.choices) - self.position, "DOWN")
+ return self.position
+
+ @input.mark(KEYMAP["interrupt"])
+ def interrupt(self):
+ move_cursor(len(self.choices) - self.position, "DOWN")
+ raise KeyboardInterrupt
+
+ @input.mark_multiple(*[KEYMAP[str(number)] for number in range(10)])
+ def select_row(self):
+ index = int(chr(self.current_selection))
+ movement = index - self.position
+ if index == self.position:
+ return
+ if index < len(self.choices):
+ if self.position > index:
+ self.move_direction(Direction.UP, -movement)
+ elif self.position < index:
+ self.move_direction(Direction.DOWN, movement)
+ else:
+ return
+ else:
+ return
+
+ def run(self, default_choice: int = 0):
+ "Start the menu and return the selected choice"
+ if self.prompt:
+ linebreak()
+ forceWrite(self.prompt, "\n")
+ if in_colab:
+ forceWrite("Please input a choice index (starting from 0), and press enter", "\n")
+ else:
+ forceWrite("Please select a choice using the arrow or number keys, and selecting with enter", "\n")
+ self.position = default_choice
+ for i in range(len(self.choices)):
+ self.print_choice(i)
+ forceWrite("\n")
+ move_cursor(len(self.choices) - self.position, "UP")
+ with cursor.hide():
+ while True:
+ if in_colab:
+ try:
+ choice = int(builtins.input())
+ except ValueError:
+ choice = default_choice
+ else:
+ choice = self.handle_input()
+ if choice is not None:
+ reset_cursor()
+ for _ in range(len(self.choices) + 1):
+ move_cursor(1, "UP")
+ clear_line()
+ self.write_choice(choice, "\n")
+ return choice
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/test.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0d2f7bcf14727aa13e3438f4cd6e6f140f5bb2f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/test.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+from accelerate.test_utils import execute_subprocess_async, path_in_accelerate_package
+
+
+def test_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser("test")
+ else:
+ parser = argparse.ArgumentParser("Accelerate test command")
+
+ parser.add_argument(
+ "--config_file",
+ default=None,
+ help=(
+ "The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
+ "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
+ "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
+ "with 'huggingface'."
+ ),
+ )
+
+ if subparsers is not None:
+ parser.set_defaults(func=test_command)
+ return parser
+
+
+def test_command(args):
+ script_name = path_in_accelerate_package("test_utils", "scripts", "test_script.py")
+
+ if args.config_file is None:
+ test_args = [script_name]
+ else:
+ test_args = f"--config_file={args.config_file} {script_name}".split()
+
+ cmd = ["accelerate-launch"] + test_args
+ result = execute_subprocess_async(cmd)
+ if result.returncode == 0:
+ print("Test is a success! You are ready for your distributed training!")
+
+
+def main():
+ parser = test_command_parser()
+ args = parser.parse_args()
+ test_command(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/tpu.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/tpu.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc0f07bf8697bfdb6484d3bf817f2e18b1313b00
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/tpu.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import subprocess
+
+from packaging.version import Version, parse
+
+from accelerate.commands.config.config_args import default_config_file, load_config_from_file
+
+
+_description = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
+
+
+def tpu_command_parser(subparsers=None):
+ if subparsers is not None:
+ parser = subparsers.add_parser("tpu-config", description=_description)
+ else:
+ parser = argparse.ArgumentParser("Accelerate tpu-config command", description=_description)
+ # Core arguments
+ config_args = parser.add_argument_group(
+ "Config Arguments", "Arguments that can be configured through `accelerate config`."
+ )
+ config_args.add_argument(
+ "--config_file",
+ type=str,
+ default=None,
+ help="Path to the config file to use for accelerate.",
+ )
+ config_args.add_argument(
+ "--tpu_name",
+ default=None,
+ help="The name of the TPU to use. If not specified, will use the TPU specified in the config file.",
+ )
+ config_args.add_argument(
+ "--tpu_zone",
+ default=None,
+ help="The zone of the TPU to use. If not specified, will use the zone specified in the config file.",
+ )
+ pod_args = parser.add_argument_group("TPU Arguments", "Arguments for options ran inside the TPU.")
+ pod_args.add_argument(
+ "--use_alpha",
+ action="store_true",
+ help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.",
+ )
+ pod_args.add_argument(
+ "--command_file",
+ default=None,
+ help="The path to the file containing the commands to run on the pod on startup.",
+ )
+ pod_args.add_argument(
+ "--command",
+ action="append",
+ nargs="+",
+ help="A command to run on the pod. Can be passed multiple times.",
+ )
+ pod_args.add_argument(
+ "--install_accelerate",
+ action="store_true",
+ help="Whether to install accelerate on the pod. Defaults to False.",
+ )
+ pod_args.add_argument(
+ "--accelerate_version",
+ default="latest",
+ help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.",
+ )
+ pod_args.add_argument(
+ "--debug", action="store_true", help="If set, will print the command that would be run instead of running it."
+ )
+
+ if subparsers is not None:
+ parser.set_defaults(func=tpu_command_launcher)
+ return parser
+
+
+def tpu_command_launcher(args):
+ defaults = None
+
+ # Get the default from the config file if it exists.
+ if args.config_file is not None or os.path.isfile(default_config_file):
+ defaults = load_config_from_file(args.config_file)
+ if not args.command_file and defaults.command_file is not None and not args.command:
+ args.command_file = defaults.command_file
+ if not args.command and defaults.commands is not None:
+ args.command = defaults.commands
+ if not args.tpu_name:
+ args.tpu_name = defaults.tpu_name
+ if not args.tpu_zone:
+ args.tpu_zone = defaults.tpu_zone
+ if args.accelerate_version == "dev":
+ args.accelerate_version = "git+https://github.com/huggingface/accelerate.git"
+ elif args.accelerate_version == "latest":
+ args.accelerate_version = "accelerate -U"
+ elif isinstance(parse(args.accelerate_version), Version):
+ args.accelerate_version = f"accelerate=={args.accelerate_version}"
+
+ if not args.command_file and not args.command:
+ raise ValueError("You must specify either a command file or a command to run on the pod.")
+
+ if args.command_file:
+ with open(args.command_file) as f:
+ args.command = [f.read().splitlines()]
+
+ # To turn list of lists into list of strings
+ if isinstance(args.command[0], list):
+ args.command = [line for cmd in args.command for line in cmd]
+ # Default to the shared folder and install accelerate
+ new_cmd = ["cd /usr/share"]
+ if args.install_accelerate:
+ new_cmd += [f"pip install {args.accelerate_version}"]
+ new_cmd += args.command
+ args.command = "; ".join(new_cmd)
+
+ # Then send it to gcloud
+ # Eventually try to use google-api-core to do this instead of subprocess
+ cmd = ["gcloud"]
+ if args.use_alpha:
+ cmd += ["alpha"]
+ cmd += [
+ "compute",
+ "tpus",
+ "tpu-vm",
+ "ssh",
+ args.tpu_name,
+ "--zone",
+ args.tpu_zone,
+ "--command",
+ args.command,
+ "--worker",
+ "all",
+ ]
+ if args.debug:
+ print(f"Running {' '.join(cmd)}")
+ return
+ subprocess.run(cmd)
+ print("Successfully setup pod.")
+
+
+def main():
+ parser = tpu_command_parser()
+ args = parser.parse_args()
+
+ tpu_command_launcher(args)
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/commands/utils.py b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b65215fac7666b475af98b17e264ef6701239bc1
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/commands/utils.py
@@ -0,0 +1,120 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+
+class _StoreAction(argparse.Action):
+ """
+ Custom action that allows for `-` or `_` to be passed in for an argument.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ new_option_strings = []
+ for option_string in self.option_strings:
+ new_option_strings.append(option_string)
+ if "_" in option_string[2:]:
+ # Add `-` version to the option string
+ new_option_strings.append(option_string.replace("_", "-"))
+ self.option_strings = new_option_strings
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, values)
+
+
+class _StoreConstAction(_StoreAction):
+ """
+ Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
+ """
+
+ def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
+ super().__init__(
+ option_strings=option_strings,
+ dest=dest,
+ nargs=0,
+ const=const,
+ default=default,
+ required=required,
+ help=help,
+ )
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ setattr(namespace, self.dest, self.const)
+
+
+class _StoreTrueAction(_StoreConstAction):
+ """
+ Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
+ """
+
+ def __init__(
+ self,
+ option_strings,
+ dest,
+ default=None,
+ required=False,
+ help=None,
+ ):
+ super().__init__(
+ option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
+ )
+
+
+class CustomArgumentGroup(argparse._ArgumentGroup):
+ """
+ Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
+ when applicable.
+ """
+
+ def _add_action(self, action):
+ args = vars(action)
+ if isinstance(action, argparse._StoreTrueAction):
+ action = _StoreTrueAction(
+ args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
+ )
+ elif isinstance(action, argparse._StoreConstAction):
+ action = _StoreConstAction(
+ args["option_strings"],
+ args["dest"],
+ args["const"],
+ args["default"],
+ args["required"],
+ args["help"],
+ )
+ elif isinstance(action, argparse._StoreAction):
+ action = _StoreAction(**args)
+ action = super()._add_action(action)
+ return action
+
+
+class CustomArgumentParser(argparse.ArgumentParser):
+ """
+ Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
+ when applicable.
+ """
+
+ def add_argument(self, *args, **kwargs):
+ if "action" in kwargs:
+ # Translate action -> class
+ if kwargs["action"] == "store_true":
+ kwargs["action"] = _StoreTrueAction
+ else:
+ kwargs["action"] = _StoreAction
+ super().add_argument(*args, **kwargs)
+
+ def add_argument_group(self, *args, **kwargs):
+ group = CustomArgumentGroup(self, *args, **kwargs)
+ self._action_groups.append(group)
+ return group
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/data_loader.py b/env-llmeval/lib/python3.10/site-packages/accelerate/data_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..0764e0971a3845d04dc1c7fc500d0c06f67d2c0e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/data_loader.py
@@ -0,0 +1,1093 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from contextlib import suppress
+from typing import Callable, List, Optional, Union
+
+import torch
+from torch.utils.data import BatchSampler, DataLoader, IterableDataset, RandomSampler
+
+from .logging import get_logger
+from .state import AcceleratorState, DistributedType, GradientState, is_torch_xla_available
+from .utils import (
+ RNGType,
+ broadcast,
+ broadcast_object_list,
+ concatenate,
+ find_batch_size,
+ get_data_structure,
+ initialize_tensors,
+ is_torch_version,
+ send_to_device,
+ slice_tensors,
+ synchronize_rng_states,
+)
+
+
+logger = get_logger(__name__)
+
+# kwargs of the DataLoader in min version 1.4.0.
+_PYTORCH_DATALOADER_KWARGS = {
+ "batch_size": 1,
+ "shuffle": False,
+ "sampler": None,
+ "batch_sampler": None,
+ "num_workers": 0,
+ "collate_fn": None,
+ "pin_memory": False,
+ "drop_last": False,
+ "timeout": 0,
+ "worker_init_fn": None,
+ "multiprocessing_context": None,
+ "generator": None,
+ "prefetch_factor": 2,
+ "persistent_workers": False,
+}
+
+# kwargs added after by version
+_PYTORCH_DATALOADER_ADDITIONAL_KWARGS = {}
+
+for v, additional_kwargs in _PYTORCH_DATALOADER_ADDITIONAL_KWARGS.items():
+ if is_torch_version(">=", v):
+ _PYTORCH_DATALOADER_KWARGS.update(additional_kwargs)
+
+
+class SeedableRandomSampler(RandomSampler):
+ """
+ Same as a random sampler, except that in `__iter__` a seed can be used.
+
+ Needed specifically in distributed cases, when the random generator for each GPU needs to start from the same seed
+ and be fully reproducable on multiple iterations.
+
+ If a custom `generator` is passed, it will rely on its initial seed as well as the current iteration it is on
+ (stored in `self.epoch`).
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.epoch = 0
+ self.initial_seed = torch.random.initial_seed()
+
+ def __iter__(self):
+ if self.generator is None:
+ self.generator = torch.Generator()
+ self.generator.manual_seed(self.initial_seed)
+
+ # Allow `self.epoch` to modify the seed of the generator
+ seed = self.epoch + self.initial_seed
+ # print("Setting seed at epoch", self.epoch, seed)
+ self.generator.manual_seed(seed)
+ yield from super().__iter__()
+ self.set_epoch(self.epoch + 1)
+
+ def set_epoch(self, epoch: int):
+ "Sets the current iteration of the sampler."
+ self.epoch = epoch
+
+
+class BatchSamplerShard(BatchSampler):
+ """
+ Wraps a PyTorch `BatchSampler` to generate batches for one of the processes only. Instances of this class will
+ always yield a number of batches that is a round multiple of `num_processes` and that all have the same size.
+ Depending on the value of the `drop_last` attribute of the batch sampler passed, it will either stop the iteration
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
+
+ Args:
+ batch_sampler (`torch.utils.data.sampler.BatchSampler`):
+ The batch sampler to split in several shards.
+ num_processes (`int`, *optional*, defaults to 1):
+ The number of processes running concurrently.
+ process_index (`int`, *optional*, defaults to 0):
+ The index of the current process.
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
+ yielding different full batches on each process.
+
+ On two processes with a sampler of `[[0, 1, 2, 3], [4, 5, 6, 7]]`, this will result in:
+
+ - the sampler on process 0 to yield `[0, 1, 2, 3]` and the sampler on process 1 to yield `[4, 5, 6, 7]` if
+ this argument is set to `False`.
+ - the sampler on process 0 to yield `[0, 1]` then `[4, 5]` and the sampler on process 1 to yield `[2, 3]`
+ then `[6, 7]` if this argument is set to `True`.
+ even_batches (`bool`, *optional*, defaults to `True`):
+ Whether or not to loop back at the beginning of the sampler when the number of samples is not a round
+ multiple of (original batch size / number of processes).
+
+
+
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
+ equal to `False`
+
+ """
+
+ def __init__(
+ self,
+ batch_sampler: BatchSampler,
+ num_processes: int = 1,
+ process_index: int = 0,
+ split_batches: bool = False,
+ even_batches: bool = True,
+ ):
+ if split_batches and batch_sampler.batch_size % num_processes != 0:
+ raise ValueError(
+ f"To use `BatchSamplerShard` in `split_batches` mode, the batch size ({batch_sampler.batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+ self.batch_sampler = batch_sampler
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+ self.even_batches = even_batches
+ self.batch_size = getattr(batch_sampler, "batch_size", None)
+ self.drop_last = getattr(batch_sampler, "drop_last", False)
+ if self.batch_size is None and self.even_batches:
+ raise ValueError(
+ "You need to use `even_batches=False` when the batch sampler has no batch size. If you "
+ "are not calling this method directly, set `accelerator.even_batches=False` instead."
+ )
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ if self.split_batches:
+ # Split batches does not change the length of the batch sampler
+ return len(self.batch_sampler)
+ if len(self.batch_sampler) % self.num_processes == 0:
+ # If the length is a round multiple of the number of processes, it's easy.
+ return len(self.batch_sampler) // self.num_processes
+ length = len(self.batch_sampler) // self.num_processes
+ if self.drop_last:
+ # Same if we drop the remainder.
+ return length
+ elif self.even_batches:
+ # When we even batches we always get +1
+ return length + 1
+ else:
+ # Otherwise it depends on the process index.
+ return length + 1 if self.process_index < len(self.batch_sampler) % self.num_processes else length
+
+ def __iter__(self):
+ return self._iter_with_split() if self.split_batches else self._iter_with_no_split()
+
+ def _iter_with_split(self):
+ initial_data = []
+ batch_length = self.batch_sampler.batch_size // self.num_processes
+ for idx, batch in enumerate(self.batch_sampler):
+ if idx == 0:
+ initial_data = batch
+ if len(batch) == self.batch_size:
+ # If the batch is full, we yield the part of it this process is responsible of.
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+
+ # If drop_last is True of the last batch was full, iteration is over, otherwise...
+ if not self.drop_last and len(initial_data) > 0 and len(batch) < self.batch_size:
+ if not self.even_batches:
+ if len(batch) > batch_length * self.process_index:
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+ else:
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
+ while len(initial_data) < self.batch_size:
+ initial_data += initial_data
+ batch = batch + initial_data
+ yield batch[batch_length * self.process_index : batch_length * (self.process_index + 1)]
+
+ def _iter_with_no_split(self):
+ initial_data = []
+ batch_to_yield = []
+ for idx, batch in enumerate(self.batch_sampler):
+ # We gather the initial indices in case we need to circle back at the end.
+ if not self.drop_last and idx < self.num_processes:
+ initial_data += batch
+ # We identify the batch to yield but wait until we ar sure every process gets a full batch before actually
+ # yielding it.
+ if idx % self.num_processes == self.process_index:
+ batch_to_yield = batch
+ if idx % self.num_processes == self.num_processes - 1 and (
+ self.batch_size is None or len(batch) == self.batch_size
+ ):
+ yield batch_to_yield
+ batch_to_yield = []
+
+ # If drop_last is True, iteration is over, otherwise...
+ if not self.drop_last and len(initial_data) > 0:
+ if not self.even_batches:
+ if len(batch_to_yield) > 0:
+ yield batch_to_yield
+ else:
+ # ... we yield the complete batch we had saved before if it has the proper length
+ if len(batch_to_yield) == self.batch_size:
+ yield batch_to_yield
+
+ # For degenerate cases where the dataset has less than num_process * batch_size samples
+ while len(initial_data) < self.num_processes * self.batch_size:
+ initial_data += initial_data
+
+ # If the last batch seen was of the proper size, it has been yielded by its process so we move to the next
+ if len(batch) == self.batch_size:
+ batch = []
+ idx += 1
+
+ # Make sure we yield a multiple of self.num_processes batches
+ cycle_index = 0
+ while idx % self.num_processes != 0 or len(batch) > 0:
+ end_index = cycle_index + self.batch_size - len(batch)
+ batch += initial_data[cycle_index:end_index]
+ if idx % self.num_processes == self.process_index:
+ yield batch
+ cycle_index = end_index
+ batch = []
+ idx += 1
+
+
+class IterableDatasetShard(IterableDataset):
+ """
+ Wraps a PyTorch `IterableDataset` to generate samples for one of the processes only. Instances of this class will
+ always yield a number of samples that is a round multiple of the actual batch size (depending of the value of
+ `split_batches`, this is either `batch_size` or `batch_size x num_processes`). Depending on the value of the
+ `drop_last` attribute of the batch sampler passed, it will either stop the iteration at the first batch that would
+ be too small or loop with indices from the beginning.
+
+ Args:
+ dataset (`torch.utils.data.dataset.IterableDataset`):
+ The batch sampler to split in several shards.
+ batch_size (`int`, *optional*, defaults to 1):
+ The size of the batches per shard (if `split_batches=False`) or the size of the batches (if
+ `split_batches=True`).
+ drop_last (`bool`, *optional*, defaults to `False`):
+ Whether or not to drop the last incomplete batch or complete the last batches by using the samples from the
+ beginning.
+ num_processes (`int`, *optional*, defaults to 1):
+ The number of processes running concurrently.
+ process_index (`int`, *optional*, defaults to 0):
+ The index of the current process.
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the shards should be created by splitting a batch to give a piece of it on each process, or by
+ yielding different full batches on each process.
+
+ On two processes with an iterable dataset yielding of `[0, 1, 2, 3, 4, 5, 6, 7]`, this will result in:
+
+ - the shard on process 0 to yield `[0, 1, 2, 3]` and the shard on process 1 to yield `[4, 5, 6, 7]` if this
+ argument is set to `False`.
+ - the shard on process 0 to yield `[0, 1, 4, 5]` and the sampler on process 1 to yield `[2, 3, 6, 7]` if
+ this argument is set to `True`.
+ """
+
+ def __init__(
+ self,
+ dataset: IterableDataset,
+ batch_size: int = 1,
+ drop_last: bool = False,
+ num_processes: int = 1,
+ process_index: int = 0,
+ split_batches: bool = False,
+ ):
+ if split_batches and batch_size > 1 and batch_size % num_processes != 0:
+ raise ValueError(
+ f"To use `IterableDatasetShard` in `split_batches` mode, the batch size ({batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self.drop_last = drop_last
+ self.num_processes = num_processes
+ self.process_index = process_index
+ self.split_batches = split_batches
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
+ if hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ # We will just raise the downstream error if the underlying dataset is not sized
+ if self.drop_last:
+ return (len(self.dataset) // (self.batch_size * self.num_processes)) * self.batch_size
+ else:
+ return math.ceil(len(self.dataset) / (self.batch_size * self.num_processes)) * self.batch_size
+
+ def __iter__(self):
+ if (
+ not hasattr(self.dataset, "set_epoch")
+ and hasattr(self.dataset, "generator")
+ and isinstance(self.dataset.generator, torch.Generator)
+ ):
+ self.dataset.generator.manual_seed(self.epoch)
+ real_batch_size = self.batch_size if self.split_batches else (self.batch_size * self.num_processes)
+ process_batch_size = (self.batch_size // self.num_processes) if self.split_batches else self.batch_size
+ process_slice = range(self.process_index * process_batch_size, (self.process_index + 1) * process_batch_size)
+
+ first_batch = None
+ current_batch = []
+ for element in self.dataset:
+ current_batch.append(element)
+ # Wait to have a full batch before yielding elements.
+ if len(current_batch) == real_batch_size:
+ for i in process_slice:
+ yield current_batch[i]
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ current_batch = []
+
+ # Finished if drop_last is True, otherwise complete the last batch with elements from the beginning.
+ if not self.drop_last and len(current_batch) > 0:
+ if first_batch is None:
+ first_batch = current_batch.copy()
+ while len(current_batch) < real_batch_size:
+ current_batch += first_batch
+ for i in process_slice:
+ yield current_batch[i]
+
+
+class DataLoaderStateMixin:
+ """
+ Mixin class that adds a state to a `DataLoader` to keep track of the status inside the dataloader such as at the
+ end of the iteration, the number of items in the dataset in the last batch relative to the batch size, and other
+ useful information that might be needed.
+
+ **Available attributes:**
+
+ - **end_of_dataloader** (`bool`) -- Whether at the last iteration or batch
+ - **remainder** (`int`) -- The number of items that are remaining in the last batch, relative to the total
+ batch size
+
+ """
+
+ def __init_subclass__(cls, **kwargs):
+ cls.end_of_dataloader = False
+ cls.remainder = -1
+
+ def reset(self):
+ self.end_of_dataloader = False
+ self.remainder = -1
+
+ def begin(self):
+ "Prepares the gradient state for the current dataloader"
+ self.reset()
+ with suppress(Exception):
+ if not self._drop_last:
+ length = getattr(self.dataset, "total_dataset_length", len(self.dataset))
+ self.remainder = length % self.total_batch_size
+ self.gradient_state._add_dataloader(self)
+
+ def end(self):
+ "Cleans up the gradient state after exiting the dataloader"
+ self.gradient_state._remove_dataloader(self)
+
+
+class DataLoaderShard(DataLoader, DataLoaderStateMixin):
+ """
+ Subclass of a PyTorch `DataLoader` that will deal with device placement and current distributed setup.
+
+ Args:
+ dataset (`torch.utils.data.dataset.Dataset`):
+ The dataset to use to build this datalaoder.
+ device (`torch.device`, *optional*):
+ If passed, the device to put all batches on.
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
+ several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: an optional `torch.Generator`
+ synchronized_generator (`torch.Generator`, *optional*):
+ A random number generator to keep synchronized across processes.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning.
+ **kwargs (additional keyword arguments, *optional*):
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(
+ self,
+ dataset,
+ device=None,
+ rng_types=None,
+ synchronized_generator=None,
+ skip_batches=0,
+ _drop_last: bool = False,
+ **kwargs,
+ ):
+ super().__init__(dataset, **kwargs)
+ self.device = device
+ self.rng_types = rng_types
+ self.synchronized_generator = synchronized_generator
+ self.skip_batches = skip_batches
+ self.gradient_state = GradientState()
+ self._drop_last = _drop_last
+ self.iteration = 0
+
+ def __iter__(self):
+ if self.rng_types is not None:
+ synchronize_rng_states(self.rng_types, self.synchronized_generator)
+ self.begin()
+
+ self.set_epoch(self.iteration)
+ dataloader_iter = super().__iter__()
+ # We iterate one batch ahead to check when we are at the end
+ try:
+ current_batch = next(dataloader_iter)
+ except StopIteration:
+ yield
+
+ batch_index = 0
+ while True:
+ try:
+ # But we still move it to the device so it is done before `StopIteration` is reached
+ if self.device is not None:
+ current_batch = send_to_device(current_batch, self.device)
+ next_batch = next(dataloader_iter)
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ batch_index += 1
+ current_batch = next_batch
+ except StopIteration:
+ self.end_of_dataloader = True
+ if batch_index >= self.skip_batches:
+ yield current_batch
+ break
+
+ self.iteration += 1
+ self.end()
+
+ def set_epoch(self, epoch: int):
+ # In case it is manually passed in, the user can set it to what they like
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler, "sampler") and hasattr(self.batch_sampler.sampler, "set_epoch"):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ # We support if a custom `Dataset` implementation has `set_epoch`
+ # or in general HF datasets `Datasets`
+ elif hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ @property
+ def total_batch_size(self):
+ batch_sampler = self.sampler if isinstance(self.sampler, BatchSampler) else self.batch_sampler
+ return (
+ batch_sampler.batch_size
+ if getattr(batch_sampler, "split_batches", False)
+ else (batch_sampler.batch_size * getattr(batch_sampler, "num_processes", 1))
+ )
+
+ @property
+ def total_dataset_length(self):
+ if hasattr(self.dataset, "total_length"):
+ return self.dataset.total_length
+ else:
+ return len(self.dataset)
+
+
+if is_torch_xla_available():
+ import torch_xla.distributed.parallel_loader as xpl
+
+ class MpDeviceLoaderWrapper(xpl.MpDeviceLoader):
+ """
+ Wrapper for the xpl.MpDeviceLoader class that knows the total batch size.
+
+ XLA preloading threads will all call DataLoaderShard's __iter__(). Remove rng_types from DataLoaderShard to
+ prevent it from using the XLA device in the preloading threads, and synchronize the RNG once from the main
+ thread only.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(self, dataloader: DataLoaderShard, device: torch.device):
+ super().__init__(dataloader, device)
+ self._rng_types = self._loader.rng_types
+ self._loader.rng_types = None
+
+ def __iter__(self):
+ if self._rng_types is not None:
+ synchronize_rng_states(self._rng_types, self._loader.synchronized_generator)
+
+ return super().__iter__()
+
+ @property
+ def total_batch_size(self):
+ return self._loader.total_batch_size
+
+ @property
+ def total_dataset_length(self):
+ return self._loader.total_dataset_length
+
+ @property
+ def batch_sampler(self):
+ return self._loader.batch_sampler
+
+
+class DataLoaderDispatcher(DataLoader, DataLoaderStateMixin):
+ """
+ Subclass of a PyTorch `DataLoader` that will iterate and preprocess on process 0 only, then dispatch on each
+ process their part of the batch.
+
+ Args:
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
+ `num_processes` batches at each iteration). Another way to see this is that the observed batch size will be
+ the same as the initial `dataloader` if this option is set to `True`, the batch size of the initial
+ `dataloader` multiplied by `num_processes` otherwise. Setting this option to `True` requires that the batch
+ size of the `dataloader` is a round multiple of `batch_size`.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning of an iteration.
+
+ **Available attributes:**
+
+ - **total_batch_size** (`int`) -- Total batch size of the dataloader across all processes.
+ Equal to the original batch size when `split_batches=True`; otherwise the original batch size * the total
+ number of processes
+
+ - **total_dataset_length** (`int`) -- Total length of the inner dataset across all processes.
+ """
+
+ def __init__(
+ self, dataset, split_batches: bool = False, skip_batches=0, _drop_last: bool = False, slice_fn=None, **kwargs
+ ):
+ shuffle = False
+ if is_torch_version(">=", "1.11.0"):
+ from torch.utils.data.datapipes.iter.combinatorics import ShufflerIterDataPipe
+
+ # We need to save the shuffling state of the DataPipe
+ if isinstance(dataset, ShufflerIterDataPipe):
+ shuffle = dataset._shuffle_enabled
+ super().__init__(dataset, **kwargs)
+ self.split_batches = split_batches
+ if shuffle:
+ torch.utils.data.graph_settings.apply_shuffle_settings(dataset, shuffle=shuffle)
+
+ self.gradient_state = GradientState()
+ self.state = AcceleratorState()
+ self._drop_last = _drop_last
+ self.skip_batches = skip_batches
+
+ self.slice_fn = slice_tensors if slice_fn is None else slice_fn
+ self.iteration = 0
+
+ def _fetch_batches(self, iterator):
+ batches, batch = None, None
+ # On process 0, we gather the batch to dispatch.
+ if self.state.process_index == 0:
+ try:
+ if self.split_batches:
+ # One batch of the main iterator is dispatched and split.
+ batch = next(iterator)
+ else:
+ # num_processes batches of the main iterator are concatenated then dispatched and split.
+ # We add the batches one by one so we have the remainder available when drop_last=False.
+ batches = []
+ for _ in range(self.state.num_processes):
+ batches.append(next(iterator))
+ try:
+ batch = concatenate(batches, dim=0)
+ except RuntimeError as e:
+ raise RuntimeError(
+ "You can't use batches of different size with `dispatch_batches=True` or when using an `IterableDataset`."
+ "either pass `dispatch_batches=False` and have each process fetch its own batch "
+ " or pass `split_batches=True`. By doing so, the main process will fetch a full batch and "
+ "slice it into `num_processes` batches for each process."
+ ) from e
+ # In both cases, we need to get the structure of the batch that we will broadcast on other
+ # processes to initialize the tensors with the right shape.
+ # data_structure, stop_iteration
+ batch_info = [get_data_structure(batch), False]
+ except StopIteration:
+ batch_info = [None, True]
+ else:
+ batch_info = [None, self._stop_iteration]
+ # This is inplace, so after this instruction, every process has the same `batch_info` as process 0.
+ broadcast_object_list(batch_info)
+ self._stop_iteration = batch_info[1]
+ if self._stop_iteration:
+ # If drop_last is False and split_batches is False, we may have a remainder to take care of.
+ if not self.split_batches and not self._drop_last:
+ if self.state.process_index == 0 and len(batches) > 0:
+ batch = concatenate(batches, dim=0)
+ batch_info = [get_data_structure(batch), False]
+ else:
+ batch_info = [None, True]
+ broadcast_object_list(batch_info)
+ return batch, batch_info
+
+ def __iter__(self):
+ self.begin()
+ self.set_epoch(self.iteration)
+ main_iterator = None
+ if is_torch_version(">=", "2.0.1"):
+ # NOTE PyTorch DataLoader adds forward compatibilities for DataPipes, which broadcasts
+ # shared seed to all dist processes. Thus, we need to create iterator for all dist processes.
+ # But, we only iterate through the DataLoader on process 0.
+ main_iterator = super().__iter__()
+ elif self.state.process_index == 0:
+ main_iterator = super().__iter__()
+ stop_iteration = False
+ self._stop_iteration = False
+ first_batch = None
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
+ batch_index = 0
+ while not stop_iteration:
+ batch, batch_info = next_batch, next_batch_info
+
+ if self.state.process_index != 0:
+ # Initialize tensors on other processes than process 0.
+ batch = initialize_tensors(batch_info[0])
+ batch = send_to_device(batch, self.state.device)
+ # Broadcast the batch before splitting it.
+ batch = broadcast(batch, from_process=0)
+
+ if not self._drop_last and first_batch is None:
+ # We keep at least num processes elements of the first batch to be able to complete the last batch
+ first_batch = self.slice_fn(
+ batch,
+ slice(0, self.state.num_processes),
+ process_index=self.state.process_index,
+ num_processes=self.state.num_processes,
+ )
+
+ if batch is None:
+ raise ValueError(
+ f"Batch does not contain any data (`{batch}`). At the end of all iterable data available before expected stop iteration."
+ )
+
+ observed_batch_size = find_batch_size(batch)
+ batch_size = observed_batch_size // self.state.num_processes
+
+ stop_iteration = self._stop_iteration
+ if not stop_iteration:
+ # We may still be at the end of the dataloader without knowing it yet: if there is nothing left in
+ # the dataloader since the number of batches is a round multiple of the number of processes.
+ next_batch, next_batch_info = self._fetch_batches(main_iterator)
+ # next_batch_info[0] is None when there are no more batches, otherwise we still need to process them.
+ if self._stop_iteration and next_batch_info[0] is None:
+ stop_iteration = True
+
+ if not self._drop_last and stop_iteration and observed_batch_size % self.state.num_processes != 0:
+ # If the last batch is not complete, let's add the first batch to it.
+ batch = concatenate([batch, first_batch], dim=0)
+ # Batch size computation above is wrong, it's off by 1 so we fix it.
+ batch_size += 1
+
+ data_slice = slice(self.state.process_index * batch_size, (self.state.process_index + 1) * batch_size)
+ batch = self.slice_fn(
+ batch,
+ data_slice,
+ process_index=self.state.process_index,
+ num_processes=self.state.num_processes,
+ )
+
+ if stop_iteration:
+ self.end_of_dataloader = True
+ self.remainder = observed_batch_size
+ if batch_index >= self.skip_batches:
+ yield batch
+ batch_index += 1
+ self.iteration += 1
+ self.end()
+
+ def set_epoch(self, epoch: int):
+ # In case it is manually passed in, the user can set it to what they like
+ if self.iteration != epoch:
+ self.iteration = epoch
+ if hasattr(self.batch_sampler.sampler, "set_epoch"):
+ self.batch_sampler.sampler.set_epoch(epoch)
+ elif hasattr(self.dataset, "set_epoch"):
+ self.dataset.set_epoch(epoch)
+
+ def __len__(self):
+ whole_length = super().__len__()
+ if self.split_batches:
+ return whole_length
+ elif self._drop_last:
+ return whole_length // self.state.num_processes
+ else:
+ return math.ceil(whole_length / self.state.num_processes)
+
+ @property
+ def total_batch_size(self):
+ return (
+ self.dataset.batch_size if self.split_batches else (self.dataset.batch_size * self.dataset.num_processes)
+ )
+
+ @property
+ def total_dataset_length(self):
+ return len(self.dataset)
+
+
+def prepare_data_loader(
+ dataloader: DataLoader,
+ device: Optional[torch.device] = None,
+ num_processes: Optional[int] = None,
+ process_index: Optional[int] = None,
+ split_batches: bool = False,
+ put_on_device: bool = False,
+ rng_types: Optional[List[Union[str, RNGType]]] = None,
+ dispatch_batches: Optional[bool] = None,
+ even_batches: bool = True,
+ slice_fn_for_dispatch: Optional[Callable] = None,
+ use_seedable_sampler: bool = False,
+) -> DataLoader:
+ """
+ Wraps a PyTorch `DataLoader` to generate batches for one of the processes only.
+
+ Depending on the value of the `drop_last` attribute of the `dataloader` passed, it will either stop the iteration
+ at the first batch that would be too small / not present on all processes or loop with indices from the beginning.
+
+ Args:
+ dataloader (`torch.utils.data.dataloader.DataLoader`):
+ The data loader to split across several devices.
+ device (`torch.device`):
+ The target device for the returned `DataLoader`.
+ num_processes (`int`, *optional*):
+ The number of processes running concurrently. Will default to the value given by
+ [`~state.AcceleratorState`].
+ process_index (`int`, *optional*):
+ The index of the current process. Will default to the value given by [`~state.AcceleratorState`].
+ split_batches (`bool`, *optional*, defaults to `False`):
+ Whether the resulting `DataLoader` should split the batches of the original data loader across devices or
+ yield full batches (in which case it will yield batches starting at the `process_index`-th and advancing of
+ `num_processes` batches at each iteration).
+
+ Another way to see this is that the observed batch size will be the same as the initial `dataloader` if
+ this option is set to `True`, the batch size of the initial `dataloader` multiplied by `num_processes`
+ otherwise.
+
+ Setting this option to `True` requires that the batch size of the `dataloader` is a round multiple of
+ `batch_size`.
+ put_on_device (`bool`, *optional*, defaults to `False`):
+ Whether or not to put the batches on `device` (only works if the batches are nested list, tuples or
+ dictionaries of tensors).
+ rng_types (list of `str` or [`~utils.RNGType`]):
+ The list of random number generators to synchronize at the beginning of each iteration. Should be one or
+ several of:
+
+ - `"torch"`: the base torch random number generator
+ - `"cuda"`: the CUDA random number generator (GPU only)
+ - `"xla"`: the XLA random number generator (TPU only)
+ - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
+ dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.
+
+ dispatch_batches (`bool`, *optional*):
+ If set to `True`, the datalaoder prepared is only iterated through on the main process and then the batches
+ are split and broadcast to each process. Will default to `True` when the underlying dataset is an
+ `IterableDataset`, `False` otherwise.
+ even_batches (`bool`, *optional*, defaults to `True`):
+ If set to `True`, in cases where the total batch size across all processes does not exactly divide the
+ dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among
+ all workers.
+ slice_fn_for_dispatch (`Callable`, *optional*`):
+ If passed, this function will be used to slice tensors across `num_processes`. Will default to
+ [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will be
+ ignored otherwise.
+ use_seedable_sampler (`bool`, *optional*, defaults to `False`):
+ Whether to use the [`~data_loader.SeedableRandomSampler`] instead of a `RandomSampler` for better
+ reproducability. Comes at a cost of potentially different performances due to different shuffling
+ algorithms but ensures results will be the *exact* same. Should be paired with `set_seed()` at every
+ `self.set_epoch`
+
+ Returns:
+ `torch.utils.data.dataloader.DataLoader`: A new data loader that will yield the portion of the batches
+
+
+
+ `BatchSampler`s with varying batch sizes are not enabled by default. To enable this behaviour, set `even_batches`
+ equal to `False`
+
+
+ """
+ if dispatch_batches is None:
+ if not put_on_device:
+ dispatch_batches = False
+ else:
+ dispatch_batches = isinstance(dataloader.dataset, IterableDataset)
+
+ if dispatch_batches and not put_on_device:
+ raise ValueError("Using `dispatch_batches=True` requires `put_on_device=True`.")
+ # Grab defaults from AcceleratorState
+ state = AcceleratorState()
+ if num_processes is None:
+ num_processes = state.num_processes
+ if process_index is None:
+ process_index = state.process_index
+
+ # Sanity check
+ if split_batches:
+ if dataloader.batch_size is not None:
+ batch_size_for_check = dataloader.batch_size
+ else:
+ # For custom batch_sampler
+ if hasattr(dataloader.batch_sampler, "batch_size"):
+ batch_size_for_check = dataloader.batch_sampler.batch_size
+ else:
+ raise ValueError(
+ "In order to use `split_batches==True` you must have a `batch_size` attribute either in the passed "
+ "`dataloader` or `dataloader.batch_sampler` objects, and it has to return a natural number. "
+ "Your `dataloader.batch_size` is None and `dataloader.batch_sampler` "
+ f"(`{type(dataloader.batch_sampler)}`) does not have the `batch_size` attribute set."
+ )
+
+ if batch_size_for_check > 1 and batch_size_for_check % num_processes != 0:
+ raise ValueError(
+ f"To use a `DataLoader` in `split_batches` mode, the batch size ({dataloader.batch_size}) "
+ f"needs to be a round multiple of the number of processes ({num_processes})."
+ )
+
+ new_dataset = dataloader.dataset
+ # Iterable dataset doesn't like batch_sampler, but data_loader creates a default one for it
+ new_batch_sampler = dataloader.batch_sampler if not isinstance(new_dataset, IterableDataset) else None
+ sampler_is_batch_sampler = False
+ synchronized_generator = None
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ if sampler_is_batch_sampler:
+ sampler = getattr(dataloader.sampler, "sampler", None)
+ else:
+ sampler = getattr(dataloader.batch_sampler, "sampler", None)
+ if isinstance(sampler, RandomSampler) and use_seedable_sampler:
+ # When iterating through the dataloader during distributed processes
+ # we want to ensure that on each process we are iterating through the same
+ # samples in the same order if a seed is set. This requires a tweak
+ # to the `torch.utils.data.RandomSampler` class (if used).
+ sampler = SeedableRandomSampler(
+ data_source=sampler.data_source,
+ replacement=sampler.replacement,
+ num_samples=sampler._num_samples,
+ generator=getattr(sampler, "generator", torch.Generator()),
+ )
+
+ if isinstance(dataloader.sampler, RandomSampler) and state.distributed_type == DistributedType.XLA:
+ # isinstance(dataloader.sampler, RandomSampler) indicates the original dataloader has `shuffle` enabled.
+ generator = torch.Generator().manual_seed(42)
+ dataloader.generator = generator
+ dataloader.sampler.generator = generator
+ # No change if no multiprocess
+ if (num_processes != 1 or state.distributed_type == DistributedType.MEGATRON_LM) and not dispatch_batches:
+ if isinstance(new_dataset, IterableDataset):
+ if getattr(dataloader.dataset, "generator", None) is not None:
+ synchronized_generator = dataloader.dataset.generator
+ new_dataset = IterableDatasetShard(
+ new_dataset,
+ batch_size=dataloader.batch_size,
+ drop_last=dataloader.drop_last,
+ num_processes=num_processes,
+ process_index=process_index,
+ split_batches=split_batches,
+ )
+ else:
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = BatchSamplerShard(
+ batch_sampler,
+ num_processes=num_processes,
+ process_index=process_index,
+ split_batches=split_batches,
+ even_batches=even_batches,
+ )
+
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
+ ignore_kwargs = [
+ "batch_size",
+ "shuffle",
+ "sampler",
+ "batch_sampler",
+ "drop_last",
+ ]
+
+ if rng_types is not None and synchronized_generator is None and "generator" in rng_types:
+ rng_types.remove("generator")
+
+ kwargs = {
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
+ for k in _PYTORCH_DATALOADER_KWARGS
+ if k not in ignore_kwargs
+ }
+
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
+ if new_batch_sampler is None:
+ kwargs["drop_last"] = dataloader.drop_last
+ kwargs["batch_size"] = (
+ dataloader.batch_size // num_processes if split_batches and not dispatch_batches else dataloader.batch_size
+ )
+ if dispatch_batches:
+ kwargs.pop("generator")
+ dataloader = DataLoaderDispatcher(
+ new_dataset,
+ split_batches=split_batches,
+ batch_sampler=new_batch_sampler,
+ _drop_last=dataloader.drop_last,
+ slice_fn=slice_fn_for_dispatch,
+ **kwargs,
+ )
+ elif sampler_is_batch_sampler:
+ dataloader = DataLoaderShard(
+ new_dataset,
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
+ sampler=new_batch_sampler,
+ batch_size=dataloader.batch_size,
+ rng_types=rng_types,
+ _drop_last=dataloader.drop_last,
+ synchronized_generator=synchronized_generator,
+ **kwargs,
+ )
+ else:
+ dataloader = DataLoaderShard(
+ new_dataset,
+ device=device if put_on_device and state.distributed_type != DistributedType.XLA else None,
+ batch_sampler=new_batch_sampler,
+ rng_types=rng_types,
+ synchronized_generator=synchronized_generator,
+ _drop_last=dataloader.drop_last,
+ **kwargs,
+ )
+
+ if isinstance(sampler, SeedableRandomSampler) and use_seedable_sampler:
+ if sampler_is_batch_sampler:
+ dataloader.sampler.sampler = sampler
+ else:
+ dataloader.batch_sampler.sampler = sampler
+ if hasattr(dataloader.batch_sampler, "batch_sampler"):
+ dataloader.batch_sampler.batch_sampler.sampler = sampler
+ if state.distributed_type == DistributedType.XLA:
+ return MpDeviceLoaderWrapper(dataloader, device)
+ return dataloader
+
+
+class SkipBatchSampler(BatchSampler):
+ """
+ A `torch.utils.data.BatchSampler` that skips the first `n` batches of another `torch.utils.data.BatchSampler`.
+ """
+
+ def __init__(self, batch_sampler, skip_batches=0):
+ self.batch_sampler = batch_sampler
+ self.skip_batches = skip_batches
+
+ def __iter__(self):
+ for index, samples in enumerate(self.batch_sampler):
+ if index >= self.skip_batches:
+ yield samples
+
+ @property
+ def total_length(self):
+ return len(self.batch_sampler)
+
+ def __len__(self):
+ return len(self.batch_sampler) - self.skip_batches
+
+
+class SkipDataLoader(DataLoader):
+ """
+ Subclass of a PyTorch `DataLoader` that will skip the first batches.
+
+ Args:
+ dataset (`torch.utils.data.dataset.Dataset`):
+ The dataset to use to build this datalaoder.
+ skip_batches (`int`, *optional*, defaults to 0):
+ The number of batches to skip at the beginning.
+ kwargs:
+ All other keyword arguments to pass to the regular `DataLoader` initialization.
+ """
+
+ def __init__(self, dataset, skip_batches=0, **kwargs):
+ super().__init__(dataset, **kwargs)
+ self.skip_batches = skip_batches
+
+ def __iter__(self):
+ for index, batch in enumerate(super().__iter__()):
+ if index >= self.skip_batches:
+ yield batch
+
+
+def skip_first_batches(dataloader, num_batches=0):
+ """
+ Creates a `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.
+ """
+ dataset = dataloader.dataset
+ sampler_is_batch_sampler = False
+ if isinstance(dataset, IterableDataset):
+ new_batch_sampler = None
+ else:
+ sampler_is_batch_sampler = isinstance(dataloader.sampler, BatchSampler)
+ batch_sampler = dataloader.sampler if sampler_is_batch_sampler else dataloader.batch_sampler
+ new_batch_sampler = SkipBatchSampler(batch_sampler, skip_batches=num_batches)
+
+ # We ignore all of those since they are all dealt with by our new_batch_sampler
+ ignore_kwargs = [
+ "batch_size",
+ "shuffle",
+ "sampler",
+ "batch_sampler",
+ "drop_last",
+ ]
+
+ kwargs = {
+ k: getattr(dataloader, k, _PYTORCH_DATALOADER_KWARGS[k])
+ for k in _PYTORCH_DATALOADER_KWARGS
+ if k not in ignore_kwargs
+ }
+
+ # Need to provide batch_size as batch_sampler is None for Iterable dataset
+ if new_batch_sampler is None:
+ kwargs["drop_last"] = dataloader.drop_last
+ kwargs["batch_size"] = dataloader.batch_size
+
+ if isinstance(dataloader, DataLoaderDispatcher):
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ kwargs["skip_batches"] = num_batches
+ dataloader = DataLoaderDispatcher(
+ dataset,
+ split_batches=dataloader.split_batches,
+ batch_sampler=new_batch_sampler,
+ _drop_last=dataloader._drop_last,
+ **kwargs,
+ )
+ elif isinstance(dataloader, DataLoaderShard):
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ kwargs["skip_batches"] = num_batches
+ elif sampler_is_batch_sampler:
+ kwargs["sampler"] = new_batch_sampler
+ kwargs["batch_size"] = dataloader.batch_size
+ else:
+ kwargs["batch_sampler"] = new_batch_sampler
+ dataloader = DataLoaderShard(
+ dataset,
+ device=dataloader.device,
+ rng_types=dataloader.rng_types,
+ synchronized_generator=dataloader.synchronized_generator,
+ **kwargs,
+ )
+ else:
+ if new_batch_sampler is None:
+ # Need to manually skip batches in the dataloader
+ dataloader = SkipDataLoader(dataset, skip_batches=num_batches, **kwargs)
+ else:
+ dataloader = DataLoader(dataset, batch_sampler=new_batch_sampler, **kwargs)
+
+ return dataloader
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/hooks.py b/env-llmeval/lib/python3.10/site-packages/accelerate/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9a4b384f3cac39e7bedabb1f5e7c0320aae6a7f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/hooks.py
@@ -0,0 +1,709 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+from typing import Dict, List, Mapping, Optional, Union
+
+import torch
+import torch.nn as nn
+
+from .state import PartialState
+from .utils import (
+ PrefixedDataset,
+ find_device,
+ named_module_tensors,
+ send_to_device,
+ set_module_tensor_to_device,
+)
+from .utils.modeling import get_non_persistent_buffers
+from .utils.other import recursive_getattr
+
+
+class ModelHook:
+ """
+ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference
+ with PyTorch existing hooks is that they get passed along the kwargs.
+
+ Class attribute:
+ - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under
+ the `torch.no_grad()` context manager.
+ """
+
+ no_grad = False
+
+ def init_hook(self, module):
+ """
+ To be executed when the hook is attached to the module.
+
+ Args:
+ module (`torch.nn.Module`): The module attached to this hook.
+ """
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ """
+ To be executed just before the forward method of the model.
+
+ Args:
+ module (`torch.nn.Module`): The module whose forward pass will be executed just after this event.
+ args (`Tuple[Any]`): The positional arguments passed to the module.
+ kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module.
+
+ Returns:
+ `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`.
+ """
+ return args, kwargs
+
+ def post_forward(self, module, output):
+ """
+ To be executed just after the forward method of the model.
+
+ Args:
+ module (`torch.nn.Module`): The module whose forward pass been executed just before this event.
+ output (`Any`): The output of the module.
+
+ Returns:
+ `Any`: The processed `output`.
+ """
+ return output
+
+ def detach_hook(self, module):
+ """
+ To be executed when the hook is detached from a module.
+
+ Args:
+ module (`torch.nn.Module`): The module detached from this hook.
+ """
+ return module
+
+
+class SequentialHook(ModelHook):
+ """
+ A hook that can contain several hooks and iterates through them at each event.
+ """
+
+ def __init__(self, *hooks):
+ self.hooks = hooks
+
+ def init_hook(self, module):
+ for hook in self.hooks:
+ module = hook.init_hook(module)
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ for hook in self.hooks:
+ args, kwargs = hook.pre_forward(module, *args, **kwargs)
+ return args, kwargs
+
+ def post_forward(self, module, output):
+ for hook in self.hooks:
+ output = hook.post_forward(module, output)
+ return output
+
+ def detach_hook(self, module):
+ for hook in self.hooks:
+ module = hook.detach_hook(module)
+ return module
+
+
+def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False):
+ """
+ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove
+ this behavior and restore the original `forward` method, use `remove_hook_from_module`.
+
+
+
+ If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks
+ together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class.
+
+
+
+ Args:
+ module (`torch.nn.Module`):
+ The module to attach a hook to.
+ hook (`ModelHook`):
+ The hook to attach.
+ append (`bool`, *optional*, defaults to `False`):
+ Whether the hook should be chained with an existing one (if module already contains a hook) or not.
+
+ Returns:
+ `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can
+ be discarded).
+ """
+
+ if append and (getattr(module, "_hf_hook", None) is not None):
+ old_hook = module._hf_hook
+ remove_hook_from_module(module)
+ hook = SequentialHook(old_hook, hook)
+
+ if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"):
+ # If we already put some hook on this module, we replace it with the new one.
+ old_forward = module._old_forward
+ else:
+ old_forward = module.forward
+ module._old_forward = old_forward
+
+ module = hook.init_hook(module)
+ module._hf_hook = hook
+
+ def new_forward(module, *args, **kwargs):
+ args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
+ if module._hf_hook.no_grad:
+ with torch.no_grad():
+ output = module._old_forward(*args, **kwargs)
+ else:
+ output = module._old_forward(*args, **kwargs)
+ return module._hf_hook.post_forward(module, output)
+
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
+ if "GraphModuleImpl" in str(type(module)):
+ module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+ else:
+ module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward)
+
+ return module
+
+
+def remove_hook_from_module(module: nn.Module, recurse=False):
+ """
+ Removes any hook attached to a module via `add_hook_to_module`.
+
+ Args:
+ module (`torch.nn.Module`): The module to attach a hook to.
+ recurse (`bool`, **optional**): Whether to remove the hooks recursively
+
+ Returns:
+ `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can
+ be discarded).
+ """
+
+ if hasattr(module, "_hf_hook"):
+ module._hf_hook.detach_hook(module)
+ delattr(module, "_hf_hook")
+
+ if hasattr(module, "_old_forward"):
+ # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail.
+ # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409
+ if "GraphModuleImpl" in str(type(module)):
+ module.__class__.forward = module._old_forward
+ else:
+ module.forward = module._old_forward
+ delattr(module, "_old_forward")
+
+ if recurse:
+ for child in module.children():
+ remove_hook_from_module(child, recurse)
+
+ return module
+
+
+class AlignDevicesHook(ModelHook):
+ """
+ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the
+ associated module, potentially offloading the weights after the forward pass.
+
+ Args:
+ execution_device (`torch.device`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass.
+ io_same_device (`bool`, *optional*, defaults to `False`):
+ Whether or not the output should be placed on the same device as the input was.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ place_submodules (`bool`, *optional*, defaults to `False`):
+ Whether to place the submodules on `execution_device` during the `init_hook` event.
+ """
+
+ def __init__(
+ self,
+ execution_device: Optional[Union[int, str, torch.device]] = None,
+ offload: bool = False,
+ io_same_device: bool = False,
+ weights_map: Optional[Mapping] = None,
+ offload_buffers: bool = False,
+ place_submodules: bool = False,
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+ ):
+ self.execution_device = execution_device
+ self.offload = offload
+ self.io_same_device = io_same_device
+ self.weights_map = weights_map
+ self.offload_buffers = offload_buffers
+ self.place_submodules = place_submodules
+ self.skip_keys = skip_keys
+
+ # Will contain the input device when `io_same_device=True`.
+ self.input_device = None
+ self.param_original_devices = {}
+ self.buffer_original_devices = {}
+ self.tied_params_names = set()
+
+ # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory
+ # for tied weights already loaded on the target execution device.
+ self.tied_params_map = tied_params_map
+
+ def __repr__(self):
+ return (
+ f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, "
+ f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, "
+ f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})"
+ )
+
+ def init_hook(self, module):
+ # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero.
+ if self.execution_device == "meta" or self.execution_device == torch.device("meta"):
+ self.tied_params_map = None
+
+ if not self.offload and self.execution_device is not None:
+ for name, _ in named_module_tensors(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map)
+ elif self.offload:
+ self.original_devices = {
+ name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules)
+ }
+ if self.weights_map is None:
+ self.weights_map = {
+ name: param.to("cpu")
+ for name, param in named_module_tensors(
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules
+ )
+ }
+ for name, _ in named_module_tensors(
+ module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True
+ ):
+ # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer,
+ # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
+ # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str]
+ # to add on the fly pointers to `tied_params_map` in the pre_forward call.
+ if (
+ self.tied_params_map is not None
+ and recursive_getattr(module, name).data_ptr() in self.tied_params_map
+ ):
+ self.tied_params_names.add(name)
+
+ set_module_tensor_to_device(module, name, "meta")
+
+ if not self.offload_buffers and self.execution_device is not None:
+ for name, _ in module.named_buffers(recurse=self.place_submodules):
+ set_module_tensor_to_device(
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
+ )
+ elif self.offload_buffers and self.execution_device is not None:
+ for name in get_non_persistent_buffers(module, recurse=self.place_submodules):
+ set_module_tensor_to_device(
+ module, name, self.execution_device, tied_params_map=self.tied_params_map
+ )
+
+ return module
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.io_same_device:
+ self.input_device = find_device([args, kwargs])
+ if self.offload:
+ self.tied_pointers_to_remove = set()
+
+ for name, _ in named_module_tensors(
+ module,
+ include_buffers=self.offload_buffers,
+ recurse=self.place_submodules,
+ remove_non_persistent=True,
+ ):
+ fp16_statistics = None
+ value = self.weights_map[name]
+ if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys():
+ if value.dtype == torch.int8:
+ fp16_statistics = self.weights_map[name.replace("weight", "SCB")]
+
+ # In case we are using offloading with tied weights, we need to keep track of the offloaded weights
+ # that are loaded on device at this point, as we will need to remove them as well from the dictionary
+ # self.tied_params_map in order to allow to free memory.
+ if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map:
+ self.tied_params_map[value.data_ptr()] = {}
+
+ if (
+ value is not None
+ and self.tied_params_map is not None
+ and value.data_ptr() in self.tied_params_map
+ and self.execution_device not in self.tied_params_map[value.data_ptr()]
+ ):
+ self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device))
+
+ set_module_tensor_to_device(
+ module,
+ name,
+ self.execution_device,
+ value=value,
+ fp16_statistics=fp16_statistics,
+ tied_params_map=self.tied_params_map,
+ )
+
+ return send_to_device(args, self.execution_device), send_to_device(
+ kwargs, self.execution_device, skip_keys=self.skip_keys
+ )
+
+ def post_forward(self, module, output):
+ if self.offload:
+ for name, _ in named_module_tensors(
+ module,
+ include_buffers=self.offload_buffers,
+ recurse=self.place_submodules,
+ remove_non_persistent=True,
+ ):
+ set_module_tensor_to_device(module, name, "meta")
+ if type(module).__name__ == "Linear8bitLt":
+ module.state.SCB = None
+ module.state.CxB = None
+
+ # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from
+ # this dictionary to allow the garbage collector to do its job.
+ for value_pointer, device in self.tied_pointers_to_remove:
+ del self.tied_params_map[value_pointer][device]
+ self.tied_pointers_to_remove = set()
+
+ if self.io_same_device and self.input_device is not None:
+ output = send_to_device(output, self.input_device, skip_keys=self.skip_keys)
+
+ return output
+
+ def detach_hook(self, module):
+ if self.offload:
+ for name, device in self.original_devices.items():
+ if device != torch.device("meta"):
+ set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None))
+ return module
+
+
+def attach_execution_device_hook(
+ module: torch.nn.Module,
+ execution_device: Union[int, str, torch.device],
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right
+ execution device
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`int`, `str` or `torch.device`):
+ The device on which inputs and model weights should be placed before the forward pass.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0:
+ add_hook_to_module(
+ module,
+ AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map),
+ )
+
+ # Break the recursion if we get to a preload module.
+ if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes:
+ return
+
+ for child in module.children():
+ attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map)
+
+
+def attach_align_device_hook(
+ module: torch.nn.Module,
+ execution_device: Optional[torch.device] = None,
+ offload: bool = False,
+ weights_map: Optional[Mapping] = None,
+ offload_buffers: bool = False,
+ module_name: str = "",
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or
+ buffers.
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`torch.device`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ module_name (`str`, *optional*, defaults to `""`):
+ The name of the module.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ # Attach the hook on this module if it has any direct tensor.
+ directs = named_module_tensors(module)
+ full_offload = (
+ offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes
+ )
+
+ if len(list(directs)) > 0 or full_offload:
+ if weights_map is not None:
+ prefix = f"{module_name}." if len(module_name) > 0 else ""
+ prefixed_weights_map = PrefixedDataset(weights_map, prefix)
+ else:
+ prefixed_weights_map = None
+ hook = AlignDevicesHook(
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=prefixed_weights_map,
+ offload_buffers=offload_buffers,
+ place_submodules=full_offload,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook, append=True)
+
+ # We stop the recursion in case we hit the full offload.
+ if full_offload:
+ return
+
+ # Recurse on all children of the module.
+ for child_name, child in module.named_children():
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
+ attach_align_device_hook(
+ child,
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=child_name,
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+
+
+def remove_hook_from_submodules(module: nn.Module):
+ """
+ Recursively removes all hooks attached on the submodules of a given model.
+
+ Args:
+ module (`torch.nn.Module`): The module on which to remove all hooks.
+ """
+ remove_hook_from_module(module)
+ for child in module.children():
+ remove_hook_from_submodules(child)
+
+
+def attach_align_device_hook_on_blocks(
+ module: nn.Module,
+ execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None,
+ offload: Union[bool, Dict[str, bool]] = False,
+ weights_map: Mapping = None,
+ offload_buffers: bool = False,
+ module_name: str = "",
+ skip_keys: Optional[Union[str, List[str]]] = None,
+ preload_module_classes: Optional[List[str]] = None,
+ tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None,
+):
+ """
+ Attaches `AlignDevicesHook` to all blocks of a given model as needed.
+
+ Args:
+ module (`torch.nn.Module`):
+ The module where we want to attach the hooks.
+ execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*):
+ The device on which inputs and model weights should be placed before the forward pass. It can be one device
+ for the whole module, or a dictionary mapping module name to device.
+ offload (`bool`, *optional*, defaults to `False`):
+ Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole
+ module, or a dictionary mapping module name to boolean.
+ weights_map (`Mapping[str, torch.Tensor]`, *optional*):
+ When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values.
+ offload_buffers (`bool`, *optional*, defaults to `False`):
+ Whether or not to include the associated module's buffers when offloading.
+ module_name (`str`, *optional*, defaults to `""`):
+ The name of the module.
+ skip_keys (`str` or `List[str]`, *optional*):
+ A list of keys to ignore when moving inputs or outputs between devices.
+ preload_module_classes (`List[str]`, *optional*):
+ A list of classes whose instances should load all their weights (even in the submodules) at the beginning
+ of the forward. This should only be used for classes that have submodules which are registered but not
+ called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
+ `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
+ tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`):
+ A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution
+ device, this parameter is useful to reuse the first available pointer of a shared weight for all others,
+ instead of duplicating memory.
+ """
+ # If one device and one offload, we've got one hook.
+ if not isinstance(execution_device, Mapping) and not isinstance(offload, dict):
+ if not offload:
+ hook = AlignDevicesHook(
+ execution_device=execution_device,
+ io_same_device=True,
+ skip_keys=skip_keys,
+ place_submodules=True,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ else:
+ attach_align_device_hook(
+ module,
+ execution_device=execution_device,
+ offload=True,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=module_name,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ return
+
+ if not isinstance(execution_device, Mapping):
+ execution_device = {key: execution_device for key in offload.keys()}
+ if not isinstance(offload, Mapping):
+ offload = {key: offload for key in execution_device.keys()}
+
+ if module_name in execution_device and module_name in offload and not offload[module_name]:
+ hook = AlignDevicesHook(
+ execution_device=execution_device[module_name],
+ offload_buffers=offload_buffers,
+ io_same_device=(module_name == ""),
+ place_submodules=True,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map)
+ elif module_name in execution_device and module_name in offload:
+ attach_align_device_hook(
+ module,
+ execution_device=execution_device[module_name],
+ offload=True,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=module_name,
+ skip_keys=skip_keys,
+ preload_module_classes=preload_module_classes,
+ tied_params_map=tied_params_map,
+ )
+ if not hasattr(module, "_hf_hook"):
+ hook = AlignDevicesHook(
+ execution_device=execution_device[module_name],
+ io_same_device=(module_name == ""),
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+ attach_execution_device_hook(
+ module,
+ execution_device[module_name],
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ elif module_name == "":
+ hook = AlignDevicesHook(
+ execution_device=execution_device.get(""),
+ io_same_device=True,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+ add_hook_to_module(module, hook)
+
+ for child_name, child in module.named_children():
+ child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name
+ attach_align_device_hook_on_blocks(
+ child,
+ execution_device=execution_device,
+ offload=offload,
+ weights_map=weights_map,
+ offload_buffers=offload_buffers,
+ module_name=child_name,
+ preload_module_classes=preload_module_classes,
+ skip_keys=skip_keys,
+ tied_params_map=tied_params_map,
+ )
+
+
+class CpuOffload(ModelHook):
+ """
+ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after
+ the forward, the user needs to call the `init_hook` method again for this.
+
+ Args:
+ execution_device(`str`, `int` or `torch.device`, *optional*):
+ The device on which the model should be executed. Will default to the MPS device if it's available, then
+ GPU 0 if there is a GPU, and finally to the CPU.
+ prev_module_hook (`UserCpuOffloadHook`, *optional*):
+ The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If
+ passed, its offload method will be called just before the forward of the model to which this hook is
+ attached.
+ """
+
+ def __init__(
+ self,
+ execution_device: Optional[Union[str, int, torch.device]] = None,
+ prev_module_hook: Optional["UserCpuOffloadHook"] = None,
+ ):
+ self.prev_module_hook = prev_module_hook
+
+ self.execution_device = execution_device if execution_device is not None else PartialState().default_device
+
+ def init_hook(self, module):
+ return module.to("cpu")
+
+ def pre_forward(self, module, *args, **kwargs):
+ if self.prev_module_hook is not None:
+ self.prev_module_hook.offload()
+ module.to(self.execution_device)
+ return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device)
+
+
+class UserCpuOffloadHook:
+ """
+ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook
+ or remove it entirely.
+ """
+
+ def __init__(self, model, hook):
+ self.model = model
+ self.hook = hook
+
+ def offload(self):
+ self.hook.init_hook(self.model)
+
+ def remove(self):
+ remove_hook_from_module(self.model)
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/inference.py b/env-llmeval/lib/python3.10/site-packages/accelerate/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf4cf15017938e34867d4eeaad120745051ab385
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/inference.py
@@ -0,0 +1,188 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import math
+from types import MethodType
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+from .state import PartialState
+from .utils import (
+ calculate_maximum_sizes,
+ convert_bytes,
+ copy_tensor_to_devices,
+ ignorant_find_batch_size,
+ infer_auto_device_map,
+ is_pippy_available,
+ pad_input_tensors,
+ send_to_device,
+)
+
+
+if is_pippy_available():
+ from pippy.IR import Pipe, PipeSplitWrapper, annotate_split_points
+ from pippy.PipelineStage import PipelineStage
+
+
+def generate_device_map(model, num_processes: int = 1, no_split_module_classes=None, max_memory: dict = None):
+ """
+ Calculates the device map for `model` with an offset for PiPPy
+ """
+ if num_processes == 1:
+ return infer_auto_device_map(model, no_split_module_classes=no_split_module_classes, clean_result=False)
+ if max_memory is None:
+ model_size, shared = calculate_maximum_sizes(model)
+
+ # Split into `n` chunks for each GPU
+ memory = (model_size + shared[0]) / num_processes
+ memory = convert_bytes(memory)
+ value, ending = memory.split(" ")
+
+ # Add a chunk to deal with potential extra shared memory instances
+ memory = math.ceil(float(value)) * 1.1
+ memory = f"{memory} {ending}"
+ max_memory = {i: memory for i in range(num_processes)}
+ device_map = infer_auto_device_map(
+ model,
+ max_memory=max_memory,
+ no_split_module_classes=no_split_module_classes,
+ clean_result=False,
+ )
+ return device_map
+
+
+def find_pippy_batch_size(args, kwargs):
+ found_batch_size = None
+ if args is not None:
+ for arg in args:
+ found_batch_size = ignorant_find_batch_size(arg)
+ if found_batch_size is not None:
+ break
+ if kwargs is not None and found_batch_size is None:
+ for kwarg in kwargs.values():
+ found_batch_size = ignorant_find_batch_size(kwarg)
+ if found_batch_size is not None:
+ break
+ return found_batch_size
+
+
+def build_pipeline(model, split_points, args, kwargs, num_chunks):
+ """
+ Attaches the split points to the model based on `self.device_map` and generates a `PipelineStage`. Requires passing
+ in needed `args` and `kwargs` as the model needs on the CPU.
+
+ Users can pass in custom `num_chunks` as an optional hyper-parameter. By default will use
+ `AcceleratorState.num_processes`
+ """
+ # We need to annotate the split points in the model for PiPPy
+ state = PartialState()
+ annotate_split_points(model, {split_point: PipeSplitWrapper.SplitPoint.BEGINNING for split_point in split_points})
+ found_batch_size = find_pippy_batch_size(args, kwargs)
+ if found_batch_size != num_chunks:
+ if args is not None:
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
+ if kwargs is not None:
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
+ pipe = Pipe.from_tracing(model, num_chunks=num_chunks, example_args=args, example_kwargs=kwargs)
+ stage = PipelineStage(pipe, state.local_process_index, device=state.device)
+
+ return stage
+
+
+def pippy_forward(forward, num_chunks, gather_output, *args, **kwargs):
+ state = PartialState()
+ output = None
+
+ if state.num_processes == 1:
+ output = forward(*args, **kwargs)
+ elif state.is_local_main_process:
+ found_batch_size = find_pippy_batch_size(args, kwargs)
+ if found_batch_size is None:
+ raise ValueError("Could not find batch size from args or kwargs")
+ else:
+ if found_batch_size != num_chunks:
+ args = pad_input_tensors(args, found_batch_size, num_chunks)
+ kwargs = pad_input_tensors(kwargs, found_batch_size, num_chunks)
+ forward(*args, **kwargs)
+ elif state.is_last_process:
+ output = forward()
+ else:
+ forward()
+ if gather_output:
+ # Each node will get a copy of the full output which is only on the last GPU
+ output = copy_tensor_to_devices(output)
+ return output
+
+
+def prepare_pippy(
+ model,
+ split_points: Optional[Union[str, List[str]]] = "auto",
+ no_split_module_classes: Optional[List[str]] = None,
+ example_args: Optional[Tuple[Any]] = (),
+ example_kwargs: Optional[Dict[str, Any]] = None,
+ num_chunks: Optional[int] = None,
+ gather_output: Optional[bool] = False,
+):
+ """
+ Wraps `model` for pipeline parallel inference.
+
+ Args:
+ model (`torch.nn.Module`):
+ A model we want to split for pipeline-parallel inference
+ split_points (`str` or `List[str]`, defaults to 'auto'):
+ How to generate the split points and chunk the model across each GPU. 'auto' will find the best balanced
+ split given any model. Should be a list of layer names in the model to split by otherwise.
+ no_split_module_classes (`List[str]`):
+ A list of class names for layers we don't want to be split.
+ example_args (tuple of model inputs):
+ The expected inputs for the model that uses order-based inputs. Recommended to use this method if possible.
+ example_kwargs (dict of model inputs)
+ The expected inputs for the model that uses dictionary-based inputs. This is a *highly* limiting structure
+ that requires the same keys be present at *all* inference calls. Not recommended unless the prior condition
+ is true for all cases.
+ num_chunks (`int`, defaults to the number of available GPUs):
+ The number of different stages the Pipeline will have. By default it will assign one chunk per GPU, but
+ this can be tuned and played with. In general one should have num_chunks >= num_gpus.
+ gather_output (`bool`, defaults to `False`):
+ If `True`, the output from the last GPU (which holds the true outputs) is sent across to all GPUs.
+ """
+ if not is_pippy_available():
+ raise ImportError(
+ "`pippy` was not found to be installed on your system. Please "
+ "install using `pip install torchpippy` or ensure you have at least version 0.2.0"
+ )
+ state = PartialState()
+ example_args = send_to_device(example_args, "cpu")
+ example_kwargs = send_to_device(example_kwargs, "cpu")
+ if num_chunks is None:
+ num_chunks = state.num_processes
+ if split_points == "auto":
+ device_map = generate_device_map(model, num_chunks, no_split_module_classes=no_split_module_classes)
+ split_points = []
+ for i in range(1, num_chunks):
+ split_points.append(next(k for k, v in device_map.items() if v == i))
+ model.hf_split_points = split_points
+ stage = build_pipeline(model, split_points, example_args, example_kwargs, num_chunks)
+ model._original_forward = model.forward
+ model._original_call = model.__call__
+ model.pippy_stage = stage
+ model.hf_split_points = split_points
+
+ def forward(*args, **kwargs):
+ return pippy_forward(stage.forward, num_chunks, gather_output, *args, **kwargs)
+
+ # To act like a decorator so that it can be popped when doing `extract_model_from_parallel`
+ # Note: creates an infinite recursion loop with `generate`
+ model_forward = MethodType(forward, model)
+ forward.__wrapped__ = model_forward
+ model.forward = forward
+ return model
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/memory_utils.py b/env-llmeval/lib/python3.10/site-packages/accelerate/memory_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa2e2c8b9d7d0064c3e5e282737a7ad6919bde29
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/memory_utils.py
@@ -0,0 +1,22 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import warnings
+
+
+warnings.warn(
+ "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
+ "`from accelerate import find_executable_batch_size` to avoid this warning.",
+ FutureWarning,
+)
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b72f4407f164314d39fae880191d36c842f8bad2
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abceca9604e869533db033fc644e31c93b263af9
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_cli.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4ac44376728e0118dbf98b162c4946d24690a3fb
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..261159b6484eb66a59b26588f362a006af0fb802
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..792c45aa7490cec817f4da7b042840152b8b9fce
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_ops.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c8c4dc9a616b7eb391d8a245217c33c803e1535
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_script.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe6abb7dad6be4d0f3bf606d770ea6c5e93a9b0f
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/__pycache__/test_sync.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9cbe26c257b515f657c05e1996d517e69613972
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7301040dbbd74972e6ca6cff6a99c681742c6754
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f5fd3fad4d424185f8090984ce8c6904d6d5be7
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67c70d2d513ca9539bfa3b96ab320b1069ba956a
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d79d4a17ca826bbce2a94c376686f84f2bcd2d94
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e79a3e1e76a29585131ae549634618316a0a8c4
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c024119491e322677312c137926070fa4b707d0c
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0479cee5dfb6ab45d33a9a81472b2a1ab125c838
Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-310.pyc differ
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py
new file mode 100644
index 0000000000000000000000000000000000000000..41c77c7ec5e6e2475a795efdb54702600eac0282
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_checkpointing.py
@@ -0,0 +1,268 @@
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import json
+import os
+
+import evaluate
+import torch
+from datasets import load_dataset
+from torch.optim import AdamW
+from torch.utils.data import DataLoader
+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
+
+from accelerate import Accelerator, DistributedType
+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
+
+
+MAX_GPU_BATCH_SIZE = 16
+EVAL_BATCH_SIZE = 32
+
+
+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
+ """
+ Creates a set of `DataLoader`s for the `glue` dataset.
+
+ Args:
+ accelerator (`Accelerator`):
+ An `Accelerator` object
+ batch_size (`int`, *optional*):
+ The batch size for the train and validation DataLoaders.
+ model_name (`str`, *optional*):
+ """
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+ datasets = load_dataset("glue", "mrpc")
+
+ def tokenize_function(examples):
+ # max_length=None => use the model max length (it's actually the default)
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
+ return outputs
+
+ # Apply the method we just defined to all the examples in all the splits of the dataset
+ tokenized_datasets = datasets.map(
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
+ )
+
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
+ # transformers library
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
+
+ def collate_fn(examples):
+ # On TPU it's best to pad everything to the same length or training will be very slow.
+ if accelerator.distributed_type == DistributedType.XLA:
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
+
+ # Instantiate dataloaders.
+ train_dataloader = DataLoader(
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
+ )
+ eval_dataloader = DataLoader(
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
+ )
+
+ return train_dataloader, eval_dataloader
+
+
+def evaluation_loop(accelerator, model, eval_dataloader, metric):
+ model.eval()
+ samples_seen = 0
+ for step, batch in enumerate(eval_dataloader):
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
+ batch.to(accelerator.device)
+ with torch.no_grad():
+ outputs = model(**batch)
+ predictions = outputs.logits.argmax(dim=-1)
+ # It is slightly faster to call this once, than multiple times
+ predictions, references = accelerator.gather(
+ (predictions, batch["labels"])
+ ) # If we are in a multiprocess environment, the last batch has duplicates
+ if accelerator.use_distributed:
+ if step == len(eval_dataloader) - 1:
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
+ else:
+ samples_seen += references.shape[0]
+ metric.add_batch(
+ predictions=predictions,
+ references=references,
+ )
+
+ eval_metric = metric.compute()
+ return eval_metric["accuracy"]
+
+
+def training_function(config, args):
+ # Initialize accelerator
+ accelerator = Accelerator()
+
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
+ lr = config["lr"]
+ num_epochs = int(config["num_epochs"])
+ seed = int(config["seed"])
+ batch_size = int(config["batch_size"])
+ model_name = args.model_name_or_path
+
+ set_seed(seed)
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
+
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
+
+ # Instantiate optimizer
+ optimizer_cls = (
+ AdamW
+ if accelerator.state.deepspeed_plugin is None
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ else DummyOptim
+ )
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
+
+ if accelerator.state.deepspeed_plugin is not None:
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
+ "gradient_accumulation_steps"
+ ]
+ else:
+ gradient_accumulation_steps = 1
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
+
+ # Instantiate scheduler
+ if (
+ accelerator.state.deepspeed_plugin is None
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ ):
+ lr_scheduler = get_linear_schedule_with_warmup(
+ optimizer=optimizer,
+ num_warmup_steps=0,
+ num_training_steps=max_training_steps,
+ )
+ else:
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
+
+ # Prepare everything
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
+ # prepare method.
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
+ )
+
+ # We need to keep track of how many total steps we have iterated over
+ overall_step = 0
+ # We also need to keep track of the stating epoch so files are named properly
+ starting_epoch = 0
+ metric = evaluate.load("glue", "mrpc")
+ ending_epoch = num_epochs
+
+ if args.partial_train_epoch is not None:
+ ending_epoch = args.partial_train_epoch
+
+ if args.resume_from_checkpoint:
+ accelerator.load_state(args.resume_from_checkpoint)
+ epoch_string = args.resume_from_checkpoint.split("epoch_")[1]
+ state_epoch_num = ""
+ for char in epoch_string:
+ if char.isdigit():
+ state_epoch_num += char
+ else:
+ break
+ starting_epoch = int(state_epoch_num) + 1
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
+ accelerator.print("resumed checkpoint performance:", accuracy)
+ accelerator.print("resumed checkpoint's scheduler's lr:", lr_scheduler.get_lr()[0])
+ accelerator.print("resumed optimizers's lr:", optimizer.param_groups[0]["lr"])
+ with open(os.path.join(args.output_dir, f"state_{starting_epoch - 1}.json")) as f:
+ resumed_state = json.load(f)
+ assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
+ assert (
+ resumed_state["lr"] == lr_scheduler.get_lr()[0]
+ ), "Scheduler learning rate mismatch, loading from checkpoint failed"
+ assert (
+ resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
+ ), "Optimizer learning rate mismatch, loading from checkpoint failed"
+ assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
+ return
+
+ # Now we train the model
+ state = {}
+ for epoch in range(starting_epoch, ending_epoch):
+ model.train()
+ for step, batch in enumerate(train_dataloader):
+ outputs = model(**batch)
+ loss = outputs.loss
+ loss = loss / gradient_accumulation_steps
+ accelerator.backward(loss)
+ if step % gradient_accumulation_steps == 0:
+ optimizer.step()
+ lr_scheduler.step()
+ optimizer.zero_grad()
+
+ overall_step += 1
+ output_dir = f"epoch_{epoch}"
+ output_dir = os.path.join(args.output_dir, output_dir)
+ accelerator.save_state(output_dir)
+ accuracy = evaluation_loop(accelerator, model, eval_dataloader, metric)
+ state["accuracy"] = accuracy
+ state["lr"] = lr_scheduler.get_lr()[0]
+ state["optimizer_lr"] = optimizer.param_groups[0]["lr"]
+ state["epoch"] = epoch
+ state["step"] = overall_step
+ accelerator.print(f"epoch {epoch}:", state)
+
+ accelerator.wait_for_everyone()
+ if accelerator.is_main_process:
+ with open(os.path.join(args.output_dir, f"state_{epoch}.json"), "w") as f:
+ json.dump(state, f)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
+ parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="bert-base-cased",
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
+ required=False,
+ )
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default=".",
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
+ )
+ parser.add_argument(
+ "--resume_from_checkpoint",
+ type=str,
+ default=None,
+ help="If the training should continue from a checkpoint folder.",
+ )
+ parser.add_argument(
+ "--partial_train_epoch",
+ type=int,
+ default=None,
+ help="If passed, the training will stop after this number of epochs.",
+ )
+ parser.add_argument(
+ "--num_epochs",
+ type=int,
+ default=2,
+ help="Number of train epochs.",
+ )
+ args = parser.parse_args()
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
+
+ training_function(config, args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ac13aba6266f31f1e0f9eb41b961fc2933d00ab
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_metrics.py
@@ -0,0 +1,306 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import math
+import os
+from copy import deepcopy
+
+import datasets
+import evaluate
+import torch
+import transformers
+from datasets import load_dataset
+from torch.utils.data import DataLoader, IterableDataset
+from transformers import AutoModelForSequenceClassification, AutoTokenizer
+
+from accelerate import Accelerator, DataLoaderConfiguration, DistributedType
+from accelerate.data_loader import DataLoaderDispatcher
+from accelerate.test_utils import RegressionDataset, RegressionModel, torch_device
+from accelerate.utils import is_torch_xla_available, set_seed
+
+
+os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "true"
+
+
+class ListHandler(logging.Handler):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.logs = []
+
+ def emit(self, record):
+ self.logs.append(record)
+
+
+def get_basic_setup(accelerator, num_samples=82, batch_size=16):
+ "Returns everything needed to perform basic training"
+ set_seed(42)
+ model = RegressionModel()
+ ddp_model = deepcopy(model)
+ dset = RegressionDataset(length=num_samples)
+ dataloader = DataLoader(dset, batch_size=batch_size)
+ model.to(accelerator.device)
+ ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader)
+ return model, ddp_model, dataloader
+
+
+def get_dataloader(accelerator: Accelerator, use_longest=False):
+ tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
+ dataset = load_dataset("glue", "mrpc", split="validation")
+
+ def tokenize_function(examples):
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
+ return outputs
+
+ with accelerator.main_process_first():
+ tokenized_datasets = dataset.map(
+ tokenize_function,
+ batched=True,
+ remove_columns=["idx", "sentence1", "sentence2"],
+ )
+
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
+
+ def collate_fn(examples):
+ if use_longest:
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
+
+ return DataLoader(tokenized_datasets, shuffle=False, collate_fn=collate_fn, batch_size=16)
+
+
+def get_mrpc_setup(dispatch_batches, split_batches):
+ dataloader_config = DataLoaderConfiguration(dispatch_batches=dispatch_batches, split_batches=split_batches)
+ accelerator = Accelerator(dataloader_config=dataloader_config)
+ dataloader = get_dataloader(accelerator, not dispatch_batches)
+ model = AutoModelForSequenceClassification.from_pretrained(
+ "hf-internal-testing/mrpc-bert-base-cased", return_dict=True
+ )
+ ddp_model, ddp_dataloader = accelerator.prepare(model, dataloader)
+ return {
+ "ddp": [ddp_model, ddp_dataloader, torch_device],
+ "no": [model, dataloader, accelerator.device],
+ }, accelerator
+
+
+def generate_predictions(model, dataloader, accelerator):
+ logits_and_targets = []
+ for batch in dataloader:
+ input, target = batch.values()
+ with torch.no_grad():
+ logit = model(input)
+ logit, target = accelerator.gather_for_metrics((logit, target))
+ logits_and_targets.append((logit, target))
+ logits, targs = [], []
+ for logit, targ in logits_and_targets:
+ logits.append(logit)
+ targs.append(targ)
+ logits, targs = torch.cat(logits), torch.cat(targs)
+ return logits, targs
+
+
+def test_torch_metrics(
+ accelerator: Accelerator, num_samples=82, dispatch_batches=False, split_batches=False, batch_size=16
+):
+ _, ddp_model, dataloader = get_basic_setup(accelerator, num_samples, batch_size)
+ logits, _ = generate_predictions(ddp_model, dataloader, accelerator)
+ assert (
+ len(logits) == num_samples
+ ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(logits)}"
+
+
+def test_mrpc(dispatch_batches: bool = False, split_batches: bool = False):
+ metric = evaluate.load("glue", "mrpc")
+ setup, accelerator = get_mrpc_setup(dispatch_batches, split_batches)
+ # First do baseline
+ model, dataloader, device = setup["no"]
+ model.to(device)
+ model.eval()
+ for batch in dataloader:
+ batch.to(device)
+ with torch.inference_mode():
+ outputs = model(**batch)
+ preds = outputs.logits.argmax(dim=-1)
+ metric.add_batch(predictions=preds, references=batch["labels"])
+ baseline = metric.compute()
+
+ # Then do distributed
+ model, dataloader, device = setup["ddp"]
+ model.eval()
+ for batch in dataloader:
+ with torch.inference_mode():
+ outputs = model(**batch)
+ preds = outputs.logits.argmax(dim=-1)
+ references = batch["labels"]
+ preds, references = accelerator.gather_for_metrics((preds, references))
+ metric.add_batch(predictions=preds, references=references)
+ distributed = metric.compute()
+
+ for key in "accuracy f1".split():
+ assert math.isclose(
+ baseline[key], distributed[key]
+ ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
+
+
+def test_gather_for_metrics_with_non_tensor_objects_iterable_dataset():
+ class DummyIterableDataset(IterableDataset):
+ def __init__(self, data):
+ self.data = data
+
+ def __len__(self):
+ return len(self.data)
+
+ def __iter__(self):
+ yield from self.data
+
+ iterable_dataset = DummyIterableDataset([n for n in range(30)])
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
+ accelerator = Accelerator()
+ prepared_dataloader = accelerator.prepare(dataloader)
+
+ if accelerator.is_main_process:
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
+ list_handler = ListHandler()
+ logger.addHandler(list_handler)
+
+ batches_for_metrics = []
+ for batch in prepared_dataloader:
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
+
+ assert torch.cat(batches_for_metrics).size(0) == 30
+
+ if accelerator.is_main_process:
+ assert len(list_handler.logs) == 0
+ logger.removeHandler(list_handler)
+
+
+def test_gather_for_metrics_with_iterable_dataset():
+ class DummyIterableDataset(IterableDataset):
+ def __init__(self, data):
+ self.data = data
+
+ def __len__(self):
+ return len(self.data)
+
+ def __iter__(self):
+ yield from self.data
+
+ iterable_dataset = DummyIterableDataset(torch.as_tensor(range(30)))
+ dataloader = DataLoader(iterable_dataset, batch_size=4)
+
+ accelerator = Accelerator()
+ prepared_dataloader = accelerator.prepare(dataloader)
+
+ assert isinstance(prepared_dataloader, DataLoaderDispatcher)
+
+ if accelerator.is_main_process:
+ logger = logging.root.manager.loggerDict["accelerate.accelerator"]
+ list_handler = ListHandler()
+ logger.addHandler(list_handler)
+
+ batches_for_metrics = []
+ for batch in prepared_dataloader:
+ batches_for_metrics.append(accelerator.gather_for_metrics(batch))
+
+ assert torch.cat(batches_for_metrics).size(0) == 30
+
+ if accelerator.is_main_process:
+ assert len(list_handler.logs) == 0
+
+ logger.removeHandler(list_handler)
+
+
+def test_gather_for_metrics_drop_last():
+ accelerator = Accelerator()
+ per_device_batch_size = 5
+ num_items = (10 * accelerator.num_processes) + 1
+ dataloader = DataLoader(range(num_items), batch_size=per_device_batch_size, drop_last=True)
+ dataloader = accelerator.prepare(dataloader)
+
+ iterator = iter(dataloader)
+ next(iterator) # Skip first batch tensor([0, 1, 2, 3, 4], device='cuda:0')
+ batch = next(iterator)
+ gathered_items = accelerator.gather_for_metrics(batch)
+
+ # Should return a full set of complete batches from each GPU
+ num_expected_items = per_device_batch_size * accelerator.num_processes
+ assert gathered_items.size(0) == (
+ num_expected_items
+ ), f"Expected number of items: {num_expected_items}, Actual: {gathered_items.size(0)}"
+
+
+def main():
+ dataloader_config = DataLoaderConfiguration(split_batches=False, dispatch_batches=False)
+ accelerator = Accelerator(dataloader_config=dataloader_config)
+ if accelerator.is_local_main_process:
+ datasets.utils.logging.set_verbosity_warning()
+ transformers.utils.logging.set_verbosity_warning()
+ else:
+ datasets.utils.logging.set_verbosity_error()
+ transformers.utils.logging.set_verbosity_error()
+ # TorchXLA does not support batch dispatching. 'put_on_device' is always False for
+ # TorchXLA, which can cause a value error in 'prepare_data_loader' function.
+ dispatch_batches_options = [False] if accelerator.state.distributed_type == DistributedType.XLA else [True, False]
+
+ # Temporarily close this test for TorchXLA due to the 'Cannot set version_counter for
+ # inference tensor' error in inference mode. Reopen it after TorchXLA fixes this bug.
+ # These are a bit slower so they should only be ran on the GPU or TPU
+ if accelerator.device.type != "cpu" and not is_torch_xla_available():
+ if accelerator.is_local_main_process:
+ print("**Testing gather_for_metrics**")
+ for split_batches in [True, False]:
+ for dispatch_batches in dispatch_batches_options:
+ if accelerator.is_local_main_process:
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`")
+ test_mrpc(dispatch_batches, split_batches)
+ accelerator.state._reset_state()
+ print("test_gather_for_metrics_with_iterable_dataset")
+ test_gather_for_metrics_with_iterable_dataset()
+ print("test gather_for_metrics_with_non_tensor_objects_iterable_dataset")
+ test_gather_for_metrics_with_non_tensor_objects_iterable_dataset()
+
+ # MpDeviceLoader in TorchXLA is an asynchronous loader that preloads several batches into cache.
+ # This can cause the 'end_of_dataloader' of DataLoaderStateMixin to be set earlier than intended.
+ # Skip this test when TorchXLA is enabled.
+ if accelerator.state.distributed_type != DistributedType.XLA:
+ if accelerator.is_local_main_process:
+ print("**Test torch metrics**")
+ for split_batches in [True, False]:
+ for dispatch_batches in dispatch_batches_options:
+ dataloader_config = DataLoaderConfiguration(
+ split_batches=split_batches, dispatch_batches=dispatch_batches
+ )
+ accelerator = Accelerator(dataloader_config=dataloader_config)
+ if accelerator.is_local_main_process:
+ print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99")
+ test_torch_metrics(accelerator, 99)
+ accelerator.state._reset_state()
+ if accelerator.is_local_main_process:
+ print("**Test last batch is not dropped when perfectly divisible**")
+ accelerator = Accelerator()
+ test_torch_metrics(accelerator, 512)
+ accelerator.state._reset_state()
+ if accelerator.is_local_main_process:
+ print("**Test that `drop_last` is taken into account**")
+ test_gather_for_metrics_drop_last()
+ accelerator.state._reset_state()
+
+
+def _mp_fn(index):
+ # For xla_spawn (TPUs)
+ main()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py
new file mode 100644
index 0000000000000000000000000000000000000000..aeb55f6c87d7831ed5d7f370a4b9d7810777bd3e
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py
@@ -0,0 +1,282 @@
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import gc
+import json
+import os
+
+import torch
+from datasets import load_dataset
+from torch.optim import AdamW
+from torch.utils.data import DataLoader
+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
+
+from accelerate import Accelerator, DistributedType
+from accelerate.utils import is_mlu_available, is_npu_available, is_xpu_available
+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
+
+
+MAX_GPU_BATCH_SIZE = 16
+EVAL_BATCH_SIZE = 32
+
+
+# Converting Bytes to Megabytes
+def b2mb(x):
+ return int(x / 2**20)
+
+
+# This context manager is used to track the peak memory usage of the process
+class TorchTracemalloc:
+ def __enter__(self):
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
+ self.begin = torch.cuda.memory_allocated()
+ elif is_mlu_available():
+ torch.mlu.empty_cache()
+ torch.mlu.reset_max_memory_allocated() # reset the peak gauge to zero
+ self.begin = torch.mlu.memory_allocated()
+ elif is_npu_available():
+ torch.npu.empty_cache()
+ torch.npu.reset_max_memory_allocated() # reset the peak gauge to zero
+ self.begin = torch.npu.memory_allocated()
+ elif is_xpu_available():
+ torch.xpu.empty_cache()
+ torch.xpu.reset_max_memory_allocated() # reset the peak gauge to zero
+ self.begin = torch.xpu.memory_allocated()
+ return self
+
+ def __exit__(self, *exc):
+ gc.collect()
+ if torch.cuda.is_available():
+ torch.cuda.empty_cache()
+ self.end = torch.cuda.memory_allocated()
+ self.peak = torch.cuda.max_memory_allocated()
+ elif is_mlu_available():
+ torch.mlu.empty_cache()
+ torch.mlu.memory_allocated() # reset the peak gauge to zero
+ self.begin = torch.mlu.max_memory_allocated()
+ elif is_npu_available():
+ torch.npu.empty_cache()
+ self.end = torch.npu.memory_allocated()
+ self.peak = torch.npu.max_memory_allocated()
+ elif is_xpu_available():
+ torch.xpu.empty_cache()
+ self.end = torch.xpu.memory_allocated()
+ self.peak = torch.xpu.max_memory_allocated()
+ self.used = b2mb(self.end - self.begin)
+ self.peaked = b2mb(self.peak - self.begin)
+ # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
+
+
+def get_dataloaders(
+ accelerator: Accelerator,
+ batch_size: int = 16,
+ model_name: str = "bert-base-cased",
+ n_train: int = 320,
+ n_val: int = 160,
+):
+ """
+ Creates a set of `DataLoader`s for the `glue` dataset.
+
+ Args:
+ accelerator (`Accelerator`):
+ An `Accelerator` object
+ batch_size (`int`, *optional*):
+ The batch size for the train and validation DataLoaders.
+ model_name (`str`, *optional*):
+ The name of the model to use.
+ n_train (`int`, *optional*):
+ The number of training examples to use.
+ n_val (`int`, *optional*):
+ The number of validation examples to use.
+ """
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+ datasets = load_dataset(
+ "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"}
+ )
+
+ def tokenize_function(examples):
+ # max_length=None => use the model max length (it's actually the default)
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
+ return outputs
+
+ # Apply the method we just defined to all the examples in all the splits of the dataset
+ tokenized_datasets = datasets.map(
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
+ )
+
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
+ # transformers library
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
+
+ def collate_fn(examples):
+ # On TPU it's best to pad everything to the same length or training will be very slow.
+ if accelerator.distributed_type == DistributedType.XLA:
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
+
+ # Instantiate dataloaders.
+ train_dataloader = DataLoader(
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
+ )
+ eval_dataloader = DataLoader(
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
+ )
+
+ return train_dataloader, eval_dataloader
+
+
+def training_function(config, args):
+ # Initialize accelerator
+ accelerator = Accelerator()
+
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
+ lr = config["lr"]
+ num_epochs = int(config["num_epochs"])
+ seed = int(config["seed"])
+ batch_size = int(config["batch_size"])
+ model_name = args.model_name_or_path
+
+ set_seed(seed)
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name, args.n_train, args.n_val)
+
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
+
+ # Instantiate optimizer
+ optimizer_cls = (
+ AdamW
+ if accelerator.state.deepspeed_plugin is None
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ else DummyOptim
+ )
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
+
+ if accelerator.state.deepspeed_plugin is not None:
+ gradient_accumulation_steps = accelerator.state.deepspeed_plugin.deepspeed_config[
+ "gradient_accumulation_steps"
+ ]
+ else:
+ gradient_accumulation_steps = 1
+ max_training_steps = (len(train_dataloader) * num_epochs) // gradient_accumulation_steps
+
+ # Instantiate scheduler
+ if (
+ accelerator.state.deepspeed_plugin is None
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ ):
+ lr_scheduler = get_linear_schedule_with_warmup(
+ optimizer=optimizer,
+ num_warmup_steps=0,
+ num_training_steps=max_training_steps,
+ )
+ else:
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
+
+ # Prepare everything
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
+ # prepare method.
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
+ )
+
+ # We need to keep track of how many total steps we have iterated over
+ overall_step = 0
+ # We also need to keep track of the stating epoch so files are named properly
+ starting_epoch = 0
+
+ # Now we train the model
+ train_total_peak_memory = {}
+ for epoch in range(starting_epoch, num_epochs):
+ with TorchTracemalloc() as tracemalloc:
+ model.train()
+ for step, batch in enumerate(train_dataloader):
+ outputs = model(**batch)
+ loss = outputs.loss
+ loss = loss / gradient_accumulation_steps
+ accelerator.backward(loss)
+ if step % gradient_accumulation_steps == 0:
+ optimizer.step()
+ lr_scheduler.step()
+ optimizer.zero_grad()
+
+ overall_step += 1
+
+ # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
+ accelerator.print(f"Memory before entering the train : {b2mb(tracemalloc.begin)}")
+ accelerator.print(f"Memory consumed at the end of the train (end-begin): {tracemalloc.used}")
+ accelerator.print(f"Peak Memory consumed during the train (max-begin): {tracemalloc.peaked}")
+ accelerator.print(
+ f"Total Peak Memory consumed during the train (max): {tracemalloc.peaked + b2mb(tracemalloc.begin)}"
+ )
+ train_total_peak_memory[f"epoch-{epoch}"] = tracemalloc.peaked + b2mb(tracemalloc.begin)
+ if args.peak_memory_upper_bound is not None:
+ assert (
+ train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
+ ), "Peak memory usage exceeded the upper bound"
+
+ accelerator.wait_for_everyone()
+ if accelerator.is_main_process:
+ with open(os.path.join(args.output_dir, "peak_memory_utilization.json"), "w") as f:
+ json.dump(train_total_peak_memory, f)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
+ parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="bert-base-cased",
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
+ required=False,
+ )
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default=".",
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
+ )
+ parser.add_argument(
+ "--peak_memory_upper_bound",
+ type=float,
+ default=None,
+ help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.",
+ )
+ parser.add_argument(
+ "--n_train",
+ type=int,
+ default=320,
+ help="Number of training examples to use.",
+ )
+ parser.add_argument(
+ "--n_val",
+ type=int,
+ default=160,
+ help="Number of validation examples to use.",
+ )
+ parser.add_argument(
+ "--num_epochs",
+ type=int,
+ default=1,
+ help="Number of train epochs.",
+ )
+ args = parser.parse_args()
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
+ training_function(config, args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py
new file mode 100644
index 0000000000000000000000000000000000000000..7051859aa74bbac5b15e4465395b8177e3dd1d27
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_performance.py
@@ -0,0 +1,243 @@
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import json
+import os
+
+import evaluate
+import torch
+from datasets import load_dataset
+from torch.optim import AdamW
+from torch.utils.data import DataLoader
+from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
+
+from accelerate import Accelerator, DistributedType
+from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
+
+
+MAX_GPU_BATCH_SIZE = 16
+EVAL_BATCH_SIZE = 32
+
+
+def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
+ """
+ Creates a set of `DataLoader`s for the `glue` dataset.
+
+ Args:
+ accelerator (`Accelerator`):
+ An `Accelerator` object
+ batch_size (`int`, *optional*):
+ The batch size for the train and validation DataLoaders.
+ model_name (`str`, *optional*):
+ """
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
+ datasets = load_dataset("glue", "mrpc")
+
+ def tokenize_function(examples):
+ # max_length=None => use the model max length (it's actually the default)
+ outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
+ return outputs
+
+ # Apply the method we just defined to all the examples in all the splits of the dataset
+ tokenized_datasets = datasets.map(
+ tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
+ )
+
+ # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
+ # transformers library
+ tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
+
+ def collate_fn(examples):
+ # On TPU it's best to pad everything to the same length or training will be very slow.
+ if accelerator.distributed_type == DistributedType.XLA:
+ return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
+ return tokenizer.pad(examples, padding="longest", return_tensors="pt")
+
+ # Instantiate dataloaders.
+ train_dataloader = DataLoader(
+ tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
+ )
+ eval_dataloader = DataLoader(
+ tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
+ )
+
+ return train_dataloader, eval_dataloader
+
+
+def training_function(config, args):
+ # Initialize accelerator
+ accelerator = Accelerator()
+
+ # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
+ lr = config["lr"]
+ num_epochs = int(config["num_epochs"])
+ seed = int(config["seed"])
+ batch_size = int(config["batch_size"])
+ model_name = args.model_name_or_path
+
+ set_seed(seed)
+ train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name)
+
+ # Instantiate the model (we build the model here so that the seed also control new weights initialization)
+ model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True)
+
+ # Instantiate optimizer
+ optimizer_cls = (
+ AdamW
+ if accelerator.state.deepspeed_plugin is None
+ or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ else DummyOptim
+ )
+ optimizer = optimizer_cls(params=model.parameters(), lr=lr)
+
+ max_training_steps = len(train_dataloader) * num_epochs
+
+ # Instantiate scheduler
+ linear_decay_scheduler = False
+ if (
+ accelerator.state.deepspeed_plugin is None
+ or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
+ ):
+ lr_scheduler = get_linear_schedule_with_warmup(
+ optimizer=optimizer,
+ num_warmup_steps=0,
+ num_training_steps=max_training_steps,
+ )
+ linear_decay_scheduler = True
+ else:
+ lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0)
+
+ # Prepare everything
+ # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
+ # prepare method.
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
+ model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
+ )
+
+ # We also need to keep track of the stating epoch so files are named properly
+ starting_epoch = 0
+
+ # Now we train the model
+ metric = evaluate.load("glue", "mrpc")
+ best_performance = 0
+ performance_metric = {}
+ expected_lr_after_first_optim_step = lr * (
+ 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps)
+ )
+ lr_scheduler_check_completed = False
+ for epoch in range(starting_epoch, num_epochs):
+ model.train()
+ for step, batch in enumerate(train_dataloader):
+ with accelerator.accumulate(model):
+ outputs = model(**batch)
+ loss = outputs.loss
+ accelerator.backward(loss)
+ optimizer.step()
+ lr_scheduler.step()
+ optimizer.zero_grad()
+
+ # assert the learning rate after first optimizer step
+ if (
+ accelerator.sync_gradients
+ and not lr_scheduler_check_completed
+ and linear_decay_scheduler
+ and accelerator.state.mixed_precision == "no"
+ ):
+ assert (
+ lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step
+ ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}"
+ lr_scheduler_check_completed = True
+
+ model.eval()
+ samples_seen = 0
+ for step, batch in enumerate(eval_dataloader):
+ # We could avoid this line since we set the accelerator with `device_placement=True`.
+ batch.to(accelerator.device)
+ with torch.no_grad():
+ outputs = model(**batch)
+ predictions = outputs.logits.argmax(dim=-1)
+ # It is slightly faster to call this once, than multiple times
+ predictions, references = accelerator.gather(
+ (predictions, batch["labels"])
+ ) # If we are in a multiprocess environment, the last batch has duplicates
+ if accelerator.use_distributed:
+ if step == len(eval_dataloader) - 1:
+ predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
+ references = references[: len(eval_dataloader.dataset) - samples_seen]
+ else:
+ samples_seen += references.shape[0]
+ metric.add_batch(
+ predictions=predictions,
+ references=references,
+ )
+
+ eval_metric = metric.compute()
+ # Use accelerator.print to print only on the main process.
+ accelerator.print(f"epoch {epoch}:", eval_metric)
+ performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
+
+ if best_performance < eval_metric["accuracy"]:
+ best_performance = eval_metric["accuracy"]
+
+ # check that the LR is 0
+ if linear_decay_scheduler and accelerator.state.mixed_precision == "no":
+ assert (
+ lr_scheduler.get_last_lr()[0] == 0
+ ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}"
+
+ if args.performance_lower_bound is not None:
+ assert (
+ args.performance_lower_bound <= best_performance
+ ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
+
+ accelerator.wait_for_everyone()
+ if accelerator.is_main_process:
+ with open(os.path.join(args.output_dir, "all_results.json"), "w") as f:
+ json.dump(performance_metric, f)
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
+ parser.add_argument(
+ "--model_name_or_path",
+ type=str,
+ default="bert-base-cased",
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
+ required=False,
+ )
+ parser.add_argument(
+ "--output_dir",
+ type=str,
+ default=".",
+ help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.",
+ )
+ parser.add_argument(
+ "--performance_lower_bound",
+ type=float,
+ default=None,
+ help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
+ )
+ parser.add_argument(
+ "--num_epochs",
+ type=int,
+ default=3,
+ help="Number of train epochs.",
+ )
+ args = parser.parse_args()
+ config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
+ training_function(config, args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py
new file mode 100644
index 0000000000000000000000000000000000000000..f589365649d56fd690b4f4104a8838f885183527
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_pippy.py
@@ -0,0 +1,129 @@
+# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+from torchvision.models import resnet34
+from transformers import (
+ BertConfig,
+ BertForMaskedLM,
+ GPT2Config,
+ GPT2ForSequenceClassification,
+ T5Config,
+ T5ForConditionalGeneration,
+)
+
+from accelerate import PartialState
+from accelerate.inference import prepare_pippy
+from accelerate.utils import DistributedType, send_to_device, set_seed
+
+
+model_to_config = {
+ "t5": (T5ForConditionalGeneration, T5Config, 1024),
+ "bert": (BertForMaskedLM, BertConfig, 512),
+ "gpt2": (GPT2ForSequenceClassification, GPT2Config, 1024),
+}
+
+
+def get_model_and_data_for_text(model_name, device, num_processes: int = 2):
+ initializer, config, seq_len = model_to_config[model_name]
+ config_args = {}
+ # Eventually needed for batch inference tests on gpt-2 when bs != 1
+ # if model_name == "gpt2":
+ # config_args["pad_token_id"] = 0
+ model_config = config(**config_args)
+ model = initializer(model_config)
+ return model, torch.randint(
+ low=0,
+ high=model_config.vocab_size,
+ size=(num_processes, seq_len),
+ device=device,
+ dtype=torch.int64,
+ requires_grad=False,
+ )
+
+
+def test_gpt2(batch_size: int = 2):
+ set_seed(42)
+ state = PartialState()
+ model, inputs = get_model_and_data_for_text("gpt2", "cpu", batch_size)
+ model = prepare_pippy(model, example_args=(inputs,), no_split_module_classes=model._no_split_modules)
+ # For inference args need to be a tuple
+ inputs = inputs.to("cuda")
+ with torch.no_grad():
+ output = model(inputs)
+ # Zach: Check that we just grab the real outputs we need at the end
+ if not state.is_last_process:
+ assert output is None, "Output was not generated on just the last process!"
+ else:
+ assert output is not None, "Output was not generated in the last process!"
+
+
+def test_t5(batch_size: int = 2):
+ set_seed(42)
+ state = PartialState()
+ model, inputs = get_model_and_data_for_text("t5", "cpu", batch_size)
+ example_inputs = {"input_ids": inputs, "decoder_input_ids": inputs}
+ model = prepare_pippy(
+ model,
+ no_split_module_classes=model._no_split_modules,
+ example_kwargs=example_inputs,
+ )
+ # For inference args need to be a tuple
+ inputs = send_to_device(example_inputs, "cuda:0")
+ with torch.no_grad():
+ output = model(*inputs.values())
+ # Zach: Check that we just grab the real outputs we need at the end
+ if not state.is_last_process:
+ assert output is None, "Output was not generated on just the last process!"
+ else:
+ assert output is not None, "Output was not generated in the last process!"
+
+
+def test_resnet(batch_size: int = 2):
+ set_seed(42)
+ state = PartialState()
+ model = resnet34()
+ input_tensor = torch.rand(batch_size, 3, 224, 224)
+ model = prepare_pippy(
+ model,
+ example_args=(input_tensor,),
+ )
+ inputs = send_to_device(input_tensor, "cuda:0")
+ with torch.no_grad():
+ output = model(inputs)
+ # Zach: Check that we just grab the real outputs we need at the end
+ if not state.is_last_process:
+ assert output is None, "Output was not generated on just the last process!"
+ else:
+ assert output is not None, "Output was not generated in the last process!"
+
+
+if __name__ == "__main__":
+ state = PartialState()
+ state.print("Testing pippy integration...")
+ if state.distributed_type == DistributedType.MULTI_GPU:
+ state.print("Testing GPT2...")
+ test_gpt2()
+ # Issue: When modifying the tokenizer for batch GPT2 inference, there's an issue
+ # due to references
+ # NameError: cannot access free variable 'chunk_args_list' where it is not associated with a value in enclosing scope
+ # test_gpt2(3)
+ state.print("Testing T5...")
+ test_t5()
+ test_t5(1)
+ test_t5(3)
+ state.print("Testing CV model...")
+ test_resnet()
+ test_resnet(3)
+ else:
+ print("Less than two GPUs found, not running tests!")
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py
new file mode 100644
index 0000000000000000000000000000000000000000..67e78a7d37c0b82113e1cdbb3e76987b24c8494f
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/external_deps/test_zero3_integration.py
@@ -0,0 +1,52 @@
+# Copyright 2024 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch.distributed
+
+from accelerate.test_utils import require_huggingface_suite
+from accelerate.utils import is_transformers_available
+
+
+if is_transformers_available():
+ from transformers import AutoModel, TrainingArguments
+
+
+GPT2_TINY = "sshleifer/tiny-gpt2"
+
+
+@require_huggingface_suite
+def init_torch_dist_then_launch_deepspeed():
+ torch.distributed.init_process_group(backend="nccl")
+ deepspeed_config = {
+ "zero_optimization": {
+ "stage": 3,
+ },
+ "train_batch_size": "auto",
+ "train_micro_batch_size_per_gpu": "auto",
+ }
+ train_args = TrainingArguments(
+ output_dir="./",
+ deepspeed=deepspeed_config,
+ )
+ model = AutoModel.from_pretrained(GPT2_TINY)
+ assert train_args is not None
+ assert model is not None
+
+
+def main():
+ init_torch_dist_then_launch_deepspeed()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0c073ac3eae5a86c351a5f8232b84bcdfb920a8
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_notebook.py
@@ -0,0 +1,56 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Test file to ensure that in general certain situational setups for notebooks work.
+"""
+
+import os
+
+from pytest import raises
+
+from accelerate import PartialState, notebook_launcher
+from accelerate.test_utils import require_bnb
+from accelerate.utils import is_bnb_available
+
+
+def basic_function():
+ # Just prints the PartialState
+ print(f"PartialState:\n{PartialState()}")
+
+
+NUM_PROCESSES = int(os.environ.get("ACCELERATE_NUM_PROCESSES", 1))
+
+
+def test_can_initialize():
+ notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
+
+
+@require_bnb
+def test_problematic_imports():
+ with raises(RuntimeError, match="Please keep these imports"):
+ import bitsandbytes as bnb # noqa: F401
+
+ notebook_launcher(basic_function, (), num_processes=NUM_PROCESSES)
+
+
+def main():
+ print("Test basic notebook can be ran")
+ test_can_initialize()
+ if is_bnb_available():
+ print("Test problematic imports (bnb)")
+ test_problematic_imports()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b18780fa70fdc2b8f579f07910c8682437459d5
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_ops.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+from accelerate import PartialState
+from accelerate.test_utils.testing import assert_exception
+from accelerate.utils.dataclasses import DistributedType
+from accelerate.utils.operations import (
+ DistributedOperationException,
+ broadcast,
+ copy_tensor_to_devices,
+ gather,
+ gather_object,
+ pad_across_processes,
+ reduce,
+)
+
+
+def create_tensor(state):
+ return (torch.arange(state.num_processes) + 1.0 + (state.num_processes * state.process_index)).to(state.device)
+
+
+def test_gather(state):
+ tensor = create_tensor(state)
+ gathered_tensor = gather(tensor)
+ assert gathered_tensor.tolist() == list(range(1, state.num_processes**2 + 1))
+
+
+def test_gather_object(state):
+ # Gather objects in TorchXLA is not supported.
+ if state.distributed_type == DistributedType.XLA:
+ return
+ obj = [state.process_index]
+ gathered_obj = gather_object(obj)
+ assert len(gathered_obj) == state.num_processes, f"{gathered_obj}, {len(gathered_obj)} != {state.num_processes}"
+ assert gathered_obj == list(range(state.num_processes)), f"{gathered_obj} != {list(range(state.num_processes))}"
+
+
+def test_gather_non_contigous(state):
+ # Skip this test because the 'is_contiguous' function of XLA tensor always returns True.
+ if state.distributed_type == DistributedType.XLA:
+ return
+ # Create a non-contiguous tensor
+ tensor = torch.arange(12).view(4, 3).t().to(state.device)
+ assert not tensor.is_contiguous()
+ # Shouldn't error out
+ _ = gather(tensor)
+
+
+def test_broadcast(state):
+ tensor = create_tensor(state)
+ broadcasted_tensor = broadcast(tensor)
+ assert broadcasted_tensor.shape == torch.Size([state.num_processes])
+ assert broadcasted_tensor.tolist() == list(range(1, state.num_processes + 1))
+
+
+def test_pad_across_processes(state):
+ # We need to pad the tensor with one more element if we are the main process
+ # to ensure that we can pad
+ if state.is_main_process:
+ tensor = torch.arange(state.num_processes + 1).to(state.device)
+ else:
+ tensor = torch.arange(state.num_processes).to(state.device)
+ padded_tensor = pad_across_processes(tensor)
+ assert padded_tensor.shape == torch.Size([state.num_processes + 1])
+ if not state.is_main_process:
+ assert padded_tensor.tolist() == list(range(0, state.num_processes)) + [0]
+
+
+def test_reduce_sum(state):
+ # For now runs on only two processes
+ if state.num_processes != 2:
+ return
+ tensor = create_tensor(state)
+ reduced_tensor = reduce(tensor, "sum")
+ truth_tensor = torch.tensor([4.0, 6]).to(state.device)
+ assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
+
+
+def test_reduce_mean(state):
+ # For now runs on only two processes
+ if state.num_processes != 2:
+ return
+ tensor = create_tensor(state)
+ reduced_tensor = reduce(tensor, "mean")
+ truth_tensor = torch.tensor([2.0, 3]).to(state.device)
+ assert torch.allclose(reduced_tensor, truth_tensor), f"{reduced_tensor} != {truth_tensor}"
+
+
+def test_op_checker(state):
+ # Must be in a distributed state, and gathering is currently not supported in TorchXLA.
+ if state.distributed_type in [DistributedType.NO, DistributedType.XLA]:
+ return
+ state.debug = True
+ # `pad_across_processes`
+ if state.process_index == 0:
+ data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
+ else:
+ data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4, 5]]]).to(state.device)}
+
+ with assert_exception(DistributedOperationException):
+ pad_across_processes(data, dim=0)
+
+ # `reduce`
+ if state.process_index == 0:
+ data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
+ else:
+ data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
+
+ with assert_exception(DistributedOperationException):
+ reduce(data)
+
+ # `broadcast`
+ if state.process_index == 0:
+ data = {"tensor": torch.tensor([[0.0, 1, 2, 3, 4]]).to(state.device)}
+ else:
+ data = {"tensor": torch.tensor([[[0.0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]]).to(state.device)}
+
+ with assert_exception(DistributedOperationException):
+ broadcast(data)
+
+ state.debug = False
+
+
+def test_copy_tensor_to_devices(state):
+ if state.distributed_type not in [DistributedType.MULTI_GPU, DistributedType.XLA]:
+ return
+ if state.is_main_process:
+ tensor = torch.tensor([1, 2, 3], dtype=torch.int).to(state.device)
+ else:
+ tensor = None
+ tensor = copy_tensor_to_devices(tensor)
+ assert torch.allclose(tensor, torch.tensor([1, 2, 3], dtype=torch.int, device=state.device))
+
+
+def _mp_fn(index):
+ # For xla_spawn (TPUs)
+ main()
+
+
+def main():
+ state = PartialState()
+ state.print(f"State: {state}")
+ state.print("testing gather")
+ test_gather(state)
+ state.print("testing gather_object")
+ test_gather_object(state)
+ state.print("testing gather non-contigous")
+ test_gather_non_contigous(state)
+ state.print("testing broadcast")
+ test_broadcast(state)
+ state.print("testing pad_across_processes")
+ test_pad_across_processes(state)
+ state.print("testing reduce_sum")
+ test_reduce_sum(state)
+ state.print("testing reduce_mean")
+ test_reduce_mean(state)
+ state.print("testing op_checker")
+ test_op_checker(state)
+ state.print("testing sending tensors across devices")
+ test_copy_tensor_to_devices(state)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..a982612e4463eb807f272b7a65093ac23008861a
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/accelerate/test_utils/scripts/test_script.py
@@ -0,0 +1,802 @@
+#!/usr/bin/env python
+
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import contextlib
+import io
+import math
+import time
+from copy import deepcopy
+from pathlib import Path
+
+import numpy as np
+import torch
+from torch.utils.data import DataLoader, Dataset
+
+from accelerate import Accelerator
+from accelerate.data_loader import SeedableRandomSampler, prepare_data_loader
+from accelerate.state import AcceleratorState
+from accelerate.test_utils import RegressionDataset, are_the_same_tensors
+from accelerate.utils import (
+ DataLoaderConfiguration,
+ DistributedType,
+ gather,
+ is_bf16_available,
+ is_datasets_available,
+ is_ipex_available,
+ is_mlu_available,
+ is_npu_available,
+ is_xpu_available,
+ set_seed,
+ synchronize_rng_states,
+)
+
+
+# TODO: remove RegressionModel4XPU once ccl support empty buffer in broadcasting.
+if is_xpu_available():
+ from accelerate.test_utils import RegressionModel4XPU as RegressionModel
+else:
+ from accelerate.test_utils import RegressionModel
+
+
+def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False):
+ "Creates a dataloader that can also use the `SeedableRandomSampler`"
+ if use_seedable_sampler:
+ # The SeedableRandomSampler is needed during distributed setups
+ # for full reproducability across processes with the `DataLoader`
+ sampler = SeedableRandomSampler(
+ generator=generator,
+ data_source=train_set,
+ num_samples=len(train_set),
+ )
+ return DataLoader(train_set, batch_size=batch_size, sampler=sampler)
+ else:
+ return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
+
+
+def print_main(state):
+ print(f"Printing from the main process {state.process_index}")
+
+
+def print_local_main(state):
+ print(f"Printing from the local main process {state.local_process_index}")
+
+
+def print_last(state):
+ print(f"Printing from the last process {state.process_index}")
+
+
+def print_on(state, process_idx):
+ print(f"Printing from process {process_idx}: {state.process_index}")
+
+
+def process_execution_check():
+ accelerator = Accelerator()
+ num_processes = accelerator.num_processes
+ # Test main_process_first context manager
+ path = Path("check_main_process_first.txt")
+ with accelerator.main_process_first():
+ if accelerator.is_main_process:
+ time.sleep(0.1) # ensure main process takes longest
+ with open(path, "a+") as f:
+ f.write("Currently in the main process\n")
+ else:
+ with open(path, "a+") as f:
+ f.write("Now on another process\n")
+ accelerator.wait_for_everyone()
+
+ if accelerator.is_main_process:
+ with open(path) as f:
+ text = "".join(f.readlines())
+ try:
+ assert text.startswith("Currently in the main process\n"), "Main process was not first"
+ if num_processes > 1:
+ assert text.endswith("Now on another process\n"), "Main process was not first"
+ assert (
+ text.count("Now on another process\n") == accelerator.num_processes - 1
+ ), f"Only wrote to file {text.count('Now on another process') + 1} times, not {accelerator.num_processes}"
+ except AssertionError:
+ path.unlink()
+ raise
+
+ if accelerator.is_main_process and path.exists():
+ path.unlink()
+ accelerator.wait_for_everyone()
+ # Test the decorators
+ f = io.StringIO()
+ with contextlib.redirect_stdout(f):
+ accelerator.on_main_process(print_main)(accelerator.state)
+ result = f.getvalue().rstrip()
+ if accelerator.is_main_process:
+ assert result == "Printing from the main process 0", f"{result} != Printing from the main process 0"
+ else:
+ assert f.getvalue().rstrip() == "", f'{result} != ""'
+ f.truncate(0)
+ f.seek(0)
+
+ with contextlib.redirect_stdout(f):
+ accelerator.on_local_main_process(print_local_main)(accelerator.state)
+ if accelerator.is_local_main_process:
+ assert f.getvalue().rstrip() == "Printing from the local main process 0"
+ else:
+ assert f.getvalue().rstrip() == ""
+ f.truncate(0)
+ f.seek(0)
+
+ with contextlib.redirect_stdout(f):
+ accelerator.on_last_process(print_last)(accelerator.state)
+ if accelerator.is_last_process:
+ assert f.getvalue().rstrip() == f"Printing from the last process {accelerator.state.num_processes - 1}"
+ else:
+ assert f.getvalue().rstrip() == ""
+ f.truncate(0)
+ f.seek(0)
+
+ for process_idx in range(num_processes):
+ with contextlib.redirect_stdout(f):
+ accelerator.on_process(print_on, process_index=process_idx)(accelerator.state, process_idx)
+ if accelerator.process_index == process_idx:
+ assert f.getvalue().rstrip() == f"Printing from process {process_idx}: {accelerator.process_index}"
+ else:
+ assert f.getvalue().rstrip() == ""
+ f.truncate(0)
+ f.seek(0)
+
+
+def init_state_check():
+ # Test we can instantiate this twice in a row.
+ state = AcceleratorState()
+ if state.local_process_index == 0:
+ print("Testing, testing. 1, 2, 3.")
+ print(state)
+
+
+def rng_sync_check():
+ state = AcceleratorState()
+ synchronize_rng_states(["torch"])
+ assert are_the_same_tensors(torch.get_rng_state()), "RNG states improperly synchronized on CPU."
+ if state.distributed_type == DistributedType.MULTI_GPU:
+ synchronize_rng_states(["cuda"])
+ assert are_the_same_tensors(torch.cuda.get_rng_state()), "RNG states improperly synchronized on GPU."
+ elif state.distributed_type == DistributedType.MULTI_XPU:
+ synchronize_rng_states(["xpu"])
+ assert are_the_same_tensors(torch.xpu.get_rng_state()), "RNG states improperly synchronized on XPU."
+ generator = torch.Generator()
+ synchronize_rng_states(["generator"], generator=generator)
+ assert are_the_same_tensors(generator.get_state()), "RNG states improperly synchronized in generator."
+
+ if state.local_process_index == 0:
+ print("All rng are properly synched.")
+
+
+def dl_preparation_check():
+ state = AcceleratorState()
+ length = 32 * state.num_processes
+
+ dl = DataLoader(range(length), batch_size=8)
+ dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result)
+
+ print(state.process_index, result, type(dl))
+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
+
+ dl = DataLoader(range(length), batch_size=8)
+ dl = prepare_data_loader(
+ dl,
+ state.device,
+ state.num_processes,
+ state.process_index,
+ put_on_device=True,
+ split_batches=True,
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result)
+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
+
+ if state.process_index == 0:
+ print("Non-shuffled dataloader passing.")
+
+ dl = DataLoader(range(length), batch_size=8, shuffle=True)
+ dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index, put_on_device=True)
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result).tolist()
+ result.sort()
+ assert result == list(range(length)), "Wrong shuffled dataloader result."
+
+ dl = DataLoader(range(length), batch_size=8, shuffle=True)
+ dl = prepare_data_loader(
+ dl,
+ state.device,
+ state.num_processes,
+ state.process_index,
+ put_on_device=True,
+ split_batches=True,
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result).tolist()
+ result.sort()
+ assert result == list(range(length)), "Wrong shuffled dataloader result."
+
+ if state.local_process_index == 0:
+ print("Shuffled dataloader passing.")
+
+
+def central_dl_preparation_check():
+ state = AcceleratorState()
+ length = 32 * state.num_processes
+
+ dl = DataLoader(range(length), batch_size=8)
+ dl = prepare_data_loader(
+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result)
+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
+
+ dl = DataLoader(range(length), batch_size=8)
+ dl = prepare_data_loader(
+ dl,
+ state.device,
+ state.num_processes,
+ state.process_index,
+ put_on_device=True,
+ split_batches=True,
+ dispatch_batches=True,
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result)
+ assert torch.equal(result.cpu(), torch.arange(0, length).long()), "Wrong non-shuffled dataloader result."
+
+ if state.process_index == 0:
+ print("Non-shuffled central dataloader passing.")
+
+ dl = DataLoader(range(length), batch_size=8, shuffle=True)
+ dl = prepare_data_loader(
+ dl, state.device, state.num_processes, state.process_index, put_on_device=True, dispatch_batches=True
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result).tolist()
+ result.sort()
+ assert result == list(range(length)), "Wrong shuffled dataloader result."
+
+ dl = DataLoader(range(length), batch_size=8, shuffle=True)
+ dl = prepare_data_loader(
+ dl,
+ state.device,
+ state.num_processes,
+ state.process_index,
+ put_on_device=True,
+ split_batches=True,
+ dispatch_batches=True,
+ )
+ result = []
+ for batch in dl:
+ result.append(gather(batch))
+ result = torch.cat(result).tolist()
+ result.sort()
+ assert result == list(range(length)), "Wrong shuffled dataloader result."
+
+ if state.local_process_index == 0:
+ print("Shuffled central dataloader passing.")
+
+
+def custom_sampler_check():
+ state = AcceleratorState()
+
+ class CustomDataset(Dataset):
+ def __init__(self, data):
+ self.data = data
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, index):
+ return self.data[index]
+
+ class CustomBatchSampler:
+ def __init__(self, dataset_length: int, batch_size: int, shuffle: bool = True):
+ self.batch_size = batch_size
+ self.data_index = np.arange(dataset_length)
+ self.shuffle = shuffle
+
+ def __iter__(self):
+ num_batches = len(self)
+ if self.shuffle:
+ index = np.random.permutation(self.data_index)
+ else:
+ index = self.data_index
+ output = np.array_split(index, num_batches)
+ yield from output
+
+ def __len__(self):
+ return math.ceil(len(self.data_index) / self.batch_size)
+
+ dataset = CustomDataset(range(32 * state.num_processes))
+ sampler = CustomBatchSampler(len(dataset), batch_size=8)
+ dl = DataLoader(dataset, batch_sampler=sampler)
+ dl = prepare_data_loader(dl, state.device, state.num_processes, state.process_index)
+ # We need just ensure that `dl.batch_sampler` (or `dl.batch_sampler.batch_sampler` is indeed the old batch sampler
+ if hasattr(dl.batch_sampler, "batch_sampler"):
+ assert isinstance(
+ dl.batch_sampler.batch_sampler, CustomBatchSampler
+ ), "Custom sampler was changed after calling `prepare_data_loader`"
+ else:
+ assert isinstance(
+ dl.batch_sampler, CustomBatchSampler
+ ), "Custom sampler was changed after calling `prepare_data_loader`"
+
+
+def check_seedable_sampler():
+ # Set seed
+ set_seed(42)
+ train_set = RegressionDataset(length=10, seed=42)
+ train_dl = DataLoader(train_set, batch_size=2, shuffle=True)
+
+ config = DataLoaderConfiguration(use_seedable_sampler=True)
+ accelerator = Accelerator(dataloader_config=config)
+ train_dl = accelerator.prepare(train_dl)
+ original_items = []
+ for _ in range(3):
+ for batch in train_dl:
+ original_items.append(batch["x"])
+ original_items = torch.cat(original_items)
+
+ # Set seed again and the epoch
+ set_seed(42)
+ train_dl.set_epoch(0)
+ new_items = []
+ for _ in range(3):
+ for batch in train_dl:
+ new_items.append(batch["x"])
+ new_items = torch.cat(new_items)
+ assert torch.allclose(original_items, new_items), "Did not obtain the same items with the same seed and epoch."
+
+
+def check_seedable_sampler_in_batch_sampler_shard():
+ set_seed(42)
+
+ config = DataLoaderConfiguration(use_seedable_sampler=True)
+ accelerator = Accelerator(dataloader_config=config)
+ assert accelerator.num_processes > 1, "This test requires more than one process."
+
+ dataloader = DataLoader(list(range(10)), batch_size=1, shuffle=True)
+ prepared_data_loader = prepare_data_loader(
+ dataloader=dataloader,
+ use_seedable_sampler=True,
+ )
+
+ target_sampler = prepared_data_loader.batch_sampler.batch_sampler.sampler
+ assert isinstance(
+ target_sampler, SeedableRandomSampler
+ ), "Sampler in BatchSamplerShard is not SeedableRandomSampler."
+
+
+def mock_training(length, batch_size, generator, use_seedable_sampler=False):
+ set_seed(42)
+ generator.manual_seed(42)
+ train_set = RegressionDataset(length=length, seed=42)
+
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+ for epoch in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ loss.backward()
+ optimizer.step()
+ return train_set, model
+
+
+def training_check(use_seedable_sampler=False):
+ state = AcceleratorState()
+ generator = torch.Generator()
+ batch_size = 8
+ length = batch_size * 4 * state.num_processes
+
+ train_set, old_model = mock_training(length, batch_size * state.num_processes, generator, use_seedable_sampler)
+ assert are_the_same_tensors(old_model.a), "Did not obtain the same model on both processes."
+ assert are_the_same_tensors(old_model.b), "Did not obtain the same model on both processes."
+
+ accelerator = Accelerator()
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
+
+ accelerator.print("Training yielded the same results on one CPU or distributed setup with no batch split.")
+
+ dataloader_config = DataLoaderConfiguration(split_batches=True, use_seedable_sampler=use_seedable_sampler)
+ accelerator = Accelerator(dataloader_config=dataloader_config)
+ train_dl = generate_baseline_dataloader(
+ train_set, generator, batch_size * state.num_processes, use_seedable_sampler
+ )
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
+
+ accelerator.print("Training yielded the same results on one CPU or distributes setup with batch split.")
+
+ if torch.cuda.is_available() or is_npu_available() or is_mlu_available():
+ # Mostly a test that FP16 doesn't crash as the operation inside the model is not converted to FP16
+ print("FP16 training check.")
+ AcceleratorState._reset_state()
+ dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler)
+ accelerator = Accelerator(mixed_precision="fp16", dataloader_config=dataloader_config)
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
+
+ if torch.cuda.is_available():
+ # Mostly a test that model.forward will have autocast when running unwrap_model(model, keep_fp32_wrapper=True)
+ print("Keep fp32 wrapper check.")
+ AcceleratorState._reset_state()
+ accelerator = Accelerator(mixed_precision="fp16")
+
+ model = torch.nn.Linear(2, 4)
+ model = accelerator.prepare(model)
+ model_with_fp32_wrapper = accelerator.unwrap_model(model, keep_fp32_wrapper=True)
+
+ # Run forward with fp16 as input.
+ # When the model is with mixed precision wrapper, no error will be raised.
+ input_tensor = torch.Tensor([1, 2]).to(dtype=torch.float16, device=accelerator.device)
+ output = model_with_fp32_wrapper(input_tensor)
+
+ # BF16 support is only for CPU + TPU, and some GPU
+ if is_bf16_available():
+ # Mostly a test that BF16 doesn't crash as the operation inside the model is not converted to BF16
+ print("BF16 training check.")
+ AcceleratorState._reset_state()
+ dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler)
+ accelerator = Accelerator(mixed_precision="bf16", dataloader_config=dataloader_config)
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
+
+ # IPEX support is only for CPU
+ if is_ipex_available():
+ print("ipex BF16 training check.")
+ AcceleratorState._reset_state()
+ dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler)
+ accelerator = Accelerator(mixed_precision="bf16", cpu=True, dataloader_config=dataloader_config)
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on CPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on CPU or distributed training."
+
+ # XPU support is only for XPU
+ if is_xpu_available():
+ print("xpu BF16 training check.")
+ AcceleratorState._reset_state()
+ dataloader_config = DataLoaderConfiguration(use_seedable_sampler=use_seedable_sampler)
+ accelerator = Accelerator(mixed_precision="bf16", cpu=False, dataloader_config=dataloader_config)
+ train_dl = generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler)
+ model = RegressionModel()
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
+
+ train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer)
+ set_seed(42)
+ generator.manual_seed(42)
+ for _ in range(3):
+ for batch in train_dl:
+ model.zero_grad()
+ output = model(batch["x"])
+ loss = torch.nn.functional.mse_loss(output, batch["y"])
+ accelerator.backward(loss)
+ optimizer.step()
+
+ model = accelerator.unwrap_model(model).cpu()
+ assert torch.allclose(old_model.a, model.a), "Did not obtain the same model on XPU or distributed training."
+ assert torch.allclose(old_model.b, model.b), "Did not obtain the same model on XPU or distributed training."
+
+
+def test_split_between_processes_dataset(datasets_Dataset):
+ state = AcceleratorState()
+ data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes)])
+ with state.split_between_processes(data, apply_padding=False) as results:
+ assert (
+ len(results) == 2
+ ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}"
+
+ data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)])
+ with state.split_between_processes(data, apply_padding=False) as results:
+ if state.is_last_process:
+ assert (
+ len(results) == 1
+ ), f"Last process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}"
+ else:
+ assert (
+ len(results) == 2
+ ), f"One of the intermediate processes did not receive two items. Process index: {state.process_index}; Length: {len(results)}"
+
+ data = datasets_Dataset.from_list([dict(k=v) for v in range(2 * state.num_processes - 1)])
+ with state.split_between_processes(data, apply_padding=True) as results:
+ if state.num_processes == 1:
+ assert (
+ len(results) == 1
+ ), f"Single process did not receive a single item. Process index: {state.process_index}; Length: {len(results)}"
+ else:
+ assert (
+ len(results) == 2
+ ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}"
+
+ state.wait_for_everyone()
+
+
+def test_split_between_processes_list():
+ state = AcceleratorState()
+ data = list(range(0, 2 * state.num_processes))
+ with state.split_between_processes(data) as results:
+ assert (
+ len(results) == 2
+ ), f"Each process did not have two items. Process index: {state.process_index}; Length: {len(results)}"
+
+ data = list(range(0, (3 * state.num_processes) - 1))
+ with state.split_between_processes(data, apply_padding=True) as results:
+ if state.is_last_process:
+ # Test that the last process gets the extra item(s)
+ num_samples_per_device = math.ceil(len(data) / state.num_processes)
+ assert (
+ len(results) == num_samples_per_device
+ ), f"Last process did not get the extra item(s). Process index: {state.process_index}; Length: {len(results)}"
+ state.wait_for_everyone()
+
+
+def test_split_between_processes_nested_dict():
+ state = AcceleratorState()
+ a = [1, 2, 3, 4, 5, 6, 7, 8]
+ b = ["a", "b", "c", "d", "e", "f", "g", "h"]
+ c = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8])
+ if state.num_processes in (1, 2, 4):
+ data = {"a": a, "b": b, "c": c}
+ data_copy = deepcopy(data)
+ with state.split_between_processes(data) as results:
+ if state.process_index == 0:
+ assert results["a"] == data_copy["a"][: 8 // state.num_processes]
+ elif state.num_processes == 2:
+ assert results["a"] == data_copy["a"][4:]
+ elif state.process_index == 3:
+ # We return a list each time
+ assert results["a"] == data_copy["a"][-2:], f'Expected: {data_copy["a"][-2]}, Actual: {results["a"]}'
+ if state.process_index == 0:
+ assert results["b"] == data_copy["b"][: 8 // state.num_processes]
+ elif state.num_processes == 2:
+ assert results["b"] == data_copy["b"][4:]
+ elif state.process_index == 3:
+ assert results["b"] == data_copy["b"][-2:]
+ if state.process_index == 0:
+ assert torch.allclose(
+ results["c"], data_copy["c"][: 8 // state.num_processes]
+ ), f"Did not obtain expected values on process 0, expected `{data['c'][:8 // state.num_processes]}`, received: {results['c']}"
+ elif state.num_processes == 2:
+ assert torch.allclose(
+ results["c"], data_copy["c"][4:]
+ ), f"Did not obtain expected values on process 2, expected `{data['c'][4:]}`, received: {results['c']}"
+ elif state.process_index == 3:
+ assert torch.allclose(
+ results["c"], data_copy["c"][-2:]
+ ), f"Did not obtain expected values on process 4, expected `{data['c'][-2:]}`, received: {results['c']}"
+
+ state.wait_for_everyone()
+
+
+def test_split_between_processes_tensor():
+ state = AcceleratorState()
+ if state.num_processes > 1:
+ data = torch.tensor([[0, 1, 2, 3], [4, 5, 6, 7]]).to(state.device)
+ with state.split_between_processes(data) as results:
+ if state.process_index == 0:
+ assert torch.allclose(results, torch.tensor([0, 1, 2, 3]).to(state.device))
+ else:
+ assert torch.allclose(results, torch.tensor([4, 5, 6, 7]).to(state.device))
+ state.wait_for_everyone()
+
+
+def test_trigger():
+ accelerator = Accelerator()
+ # should start with being false
+ assert accelerator.check_trigger() is False
+
+ # set a breakpoint on the main process
+ if accelerator.is_main_process:
+ accelerator.set_trigger()
+
+ # check it's been activated across all processes
+ # calls `all_reduce` and triggers a sync
+ assert accelerator.check_trigger() is True
+
+ # check it's been reset after the sync
+ assert accelerator.check_trigger() is False
+
+
+def test_reinstantiated_state():
+ import pytest
+
+ AcceleratorState._reset_state()
+ simple_model = torch.nn.Linear(1, 1)
+ # First define an accelerator
+ accelerator = Accelerator()
+ # Then call `reset_state`, breaking the state existing in the accelerator
+ AcceleratorState._reset_state()
+ # Now try and prepare a simple model, should raise the custom error early
+ with pytest.raises(AttributeError) as cm:
+ accelerator.prepare(simple_model)
+ assert "`AcceleratorState` object has no attribute" in str(cm.value.args[0])
+ assert "This happens if `AcceleratorState._reset_state()`" in str(cm.value.args[0])
+
+
+def main():
+ accelerator = Accelerator()
+ state = accelerator.state
+ if state.local_process_index == 0:
+ print("**Initialization**")
+ init_state_check()
+ state.wait_for_everyone()
+
+ if state.distributed_type == DistributedType.MULTI_GPU:
+ num_processes_per_node = torch.cuda.device_count()
+ else:
+ num_processes_per_node = state.num_processes
+
+ # We only run this test on non-multinode
+ if num_processes_per_node == state.num_processes:
+ if state.process_index == 0:
+ print("\n**Test process execution**")
+ process_execution_check()
+
+ if state.process_index == 0:
+ print("\n**Test split between processes as a list**")
+ test_split_between_processes_list()
+
+ if state.process_index == 0:
+ print("\n**Test split between processes as a dict**")
+ test_split_between_processes_nested_dict()
+
+ if state.process_index == 0:
+ print("\n**Test split between processes as a tensor**")
+ test_split_between_processes_tensor()
+
+ if state.process_index == 0:
+ print("\n**Test split between processes as a datasets.Dataset**")
+ if is_datasets_available():
+ from datasets import Dataset as datasets_Dataset
+
+ test_split_between_processes_dataset(datasets_Dataset)
+ else:
+ print("Skipped because Hugging Face datasets is not available")
+
+ if state.local_process_index == 0:
+ print("\n**Test random number generator synchronization**")
+ rng_sync_check()
+
+ if state.local_process_index == 0:
+ print("\n**DataLoader integration test**")
+ dl_preparation_check()
+ if state.distributed_type != DistributedType.XLA:
+ central_dl_preparation_check()
+ custom_sampler_check()
+ check_seedable_sampler()
+
+ if state.num_processes > 1:
+ check_seedable_sampler_in_batch_sampler_shard()
+
+ # Trainings are not exactly the same in DeepSpeed and CPU mode
+ if state.distributed_type == DistributedType.DEEPSPEED:
+ return
+
+ if state.local_process_index == 0:
+ print("\n**Training integration test**")
+ training_check(use_seedable_sampler=False)
+ training_check(use_seedable_sampler=True)
+
+ if state.local_process_index == 0:
+ print("\n**Breakpoint trigger test**")
+ test_trigger()
+
+ if state.local_process_index == 0:
+ print("\n**Test reinstantiated state**")
+ test_reinstantiated_state()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/env-llmeval/lib/python3.10/site-packages/distutils-precedence.pth b/env-llmeval/lib/python3.10/site-packages/distutils-precedence.pth
new file mode 100644
index 0000000000000000000000000000000000000000..10c404f6ad452c148c46a39e11ddd4bc58530d16
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/distutils-precedence.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7ea7ffef3fe2a117ee12c68ed6553617f0d7fd2f0590257c25c484959a3b7373
+size 152
diff --git a/env-llmeval/lib/python3.10/site-packages/threadpoolctl.py b/env-llmeval/lib/python3.10/site-packages/threadpoolctl.py
new file mode 100644
index 0000000000000000000000000000000000000000..36fec13daee29a62e728b969b21561cd70443e13
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/threadpoolctl.py
@@ -0,0 +1,1289 @@
+"""threadpoolctl
+
+This module provides utilities to introspect native libraries that relies on
+thread pools (notably BLAS and OpenMP implementations) and dynamically set the
+maximal number of threads they can use.
+"""
+
+# License: BSD 3-Clause
+
+# The code to introspect dynamically loaded libraries on POSIX systems is
+# adapted from code by Intel developer @anton-malakhov available at
+# https://github.com/IntelPython/smp (Copyright (c) 2017, Intel Corporation)
+# and also published under the BSD 3-Clause license
+import os
+import re
+import sys
+import ctypes
+import textwrap
+from typing import final
+import warnings
+from ctypes.util import find_library
+from abc import ABC, abstractmethod
+from functools import lru_cache
+from contextlib import ContextDecorator
+
+__version__ = "3.4.0"
+__all__ = [
+ "threadpool_limits",
+ "threadpool_info",
+ "ThreadpoolController",
+ "LibController",
+ "register",
+]
+
+
+# One can get runtime errors or even segfaults due to multiple OpenMP libraries
+# loaded simultaneously which can happen easily in Python when importing and
+# using compiled extensions built with different compilers and therefore
+# different OpenMP runtimes in the same program. In particular libiomp (used by
+# Intel ICC) and libomp used by clang/llvm tend to crash. This can happen for
+# instance when calling BLAS inside a prange. Setting the following environment
+# variable allows multiple OpenMP libraries to be loaded. It should not degrade
+# performances since we manually take care of potential over-subscription
+# performance issues, in sections of the code where nested OpenMP loops can
+# happen, by dynamically reconfiguring the inner OpenMP runtime to temporarily
+# disable it while under the scope of the outer OpenMP parallel section.
+os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
+
+# Structure to cast the info on dynamically loaded library. See
+# https://linux.die.net/man/3/dl_iterate_phdr for more details.
+_SYSTEM_UINT = ctypes.c_uint64 if sys.maxsize > 2**32 else ctypes.c_uint32
+_SYSTEM_UINT_HALF = ctypes.c_uint32 if sys.maxsize > 2**32 else ctypes.c_uint16
+
+
+class _dl_phdr_info(ctypes.Structure):
+ _fields_ = [
+ ("dlpi_addr", _SYSTEM_UINT), # Base address of object
+ ("dlpi_name", ctypes.c_char_p), # path to the library
+ ("dlpi_phdr", ctypes.c_void_p), # pointer on dlpi_headers
+ ("dlpi_phnum", _SYSTEM_UINT_HALF), # number of elements in dlpi_phdr
+ ]
+
+
+# The RTLD_NOLOAD flag for loading shared libraries is not defined on Windows.
+try:
+ _RTLD_NOLOAD = os.RTLD_NOLOAD
+except AttributeError:
+ _RTLD_NOLOAD = ctypes.DEFAULT_MODE
+
+
+class LibController(ABC):
+ """Abstract base class for the individual library controllers
+
+ A library controller must expose the following class attributes:
+ - user_api : str
+ Usually the name of the library or generic specification the library
+ implements, e.g. "blas" is a specification with different implementations.
+ - internal_api : str
+ Usually the name of the library or concrete implementation of some
+ specification, e.g. "openblas" is an implementation of the "blas"
+ specification.
+ - filename_prefixes : tuple
+ Possible prefixes of the shared library's filename that allow to
+ identify the library. e.g. "libopenblas" for libopenblas.so.
+
+ and implement the following methods: `get_num_threads`, `set_num_threads` and
+ `get_version`.
+
+ Threadpoolctl loops through all the loaded shared libraries and tries to match
+ the filename of each library with the `filename_prefixes`. If a match is found, a
+ controller is instantiated and a handler to the library is stored in the `dynlib`
+ attribute as a `ctypes.CDLL` object. It can be used to access the necessary symbols
+ of the shared library to implement the above methods.
+
+ The following information will be exposed in the info dictionary:
+ - user_api : standardized API, if any, or a copy of internal_api.
+ - internal_api : implementation-specific API.
+ - num_threads : the current thread limit.
+ - prefix : prefix of the shared library's filename.
+ - filepath : path to the loaded shared library.
+ - version : version of the library (if available).
+
+ In addition, each library controller may expose internal API specific entries. They
+ must be set as attributes in the `set_additional_attributes` method.
+ """
+
+ @final
+ def __init__(self, *, filepath=None, prefix=None, parent=None):
+ """This is not meant to be overriden by subclasses."""
+ self.parent = parent
+ self.prefix = prefix
+ self.filepath = filepath
+ self.dynlib = ctypes.CDLL(filepath, mode=_RTLD_NOLOAD)
+ self.version = self.get_version()
+ self.set_additional_attributes()
+
+ def info(self):
+ """Return relevant info wrapped in a dict"""
+ exposed_attrs = {
+ "user_api": self.user_api,
+ "internal_api": self.internal_api,
+ "num_threads": self.num_threads,
+ **vars(self),
+ }
+ exposed_attrs.pop("dynlib")
+ exposed_attrs.pop("parent")
+ return exposed_attrs
+
+ def set_additional_attributes(self):
+ """Set additional attributes meant to be exposed in the info dict"""
+
+ @property
+ def num_threads(self):
+ """Exposes the current thread limit as a dynamic property
+
+ This is not meant to be used or overriden by subclasses.
+ """
+ return self.get_num_threads()
+
+ @abstractmethod
+ def get_num_threads(self):
+ """Return the maximum number of threads available to use"""
+
+ @abstractmethod
+ def set_num_threads(self, num_threads):
+ """Set the maximum number of threads to use"""
+
+ @abstractmethod
+ def get_version(self):
+ """Return the version of the shared library"""
+
+
+class OpenBLASController(LibController):
+ """Controller class for OpenBLAS"""
+
+ user_api = "blas"
+ internal_api = "openblas"
+ filename_prefixes = ("libopenblas", "libblas")
+ check_symbols = (
+ "openblas_get_num_threads",
+ "openblas_get_num_threads64_",
+ "openblas_set_num_threads",
+ "openblas_set_num_threads64_",
+ "openblas_get_config",
+ "openblas_get_config64_",
+ "openblas_get_parallel",
+ "openblas_get_parallel64_",
+ "openblas_get_corename",
+ "openblas_get_corename64_",
+ )
+
+ def set_additional_attributes(self):
+ self.threading_layer = self._get_threading_layer()
+ self.architecture = self._get_architecture()
+
+ def get_num_threads(self):
+ get_func = getattr(
+ self.dynlib,
+ "openblas_get_num_threads",
+ # Symbols differ when built for 64bit integers in Fortran
+ getattr(self.dynlib, "openblas_get_num_threads64_", lambda: None),
+ )
+
+ return get_func()
+
+ def set_num_threads(self, num_threads):
+ set_func = getattr(
+ self.dynlib,
+ "openblas_set_num_threads",
+ # Symbols differ when built for 64bit integers in Fortran
+ getattr(
+ self.dynlib, "openblas_set_num_threads64_", lambda num_threads: None
+ ),
+ )
+ return set_func(num_threads)
+
+ def get_version(self):
+ # None means OpenBLAS is not loaded or version < 0.3.4, since OpenBLAS
+ # did not expose its version before that.
+ get_config = getattr(
+ self.dynlib,
+ "openblas_get_config",
+ getattr(self.dynlib, "openblas_get_config64_", None),
+ )
+ if get_config is None:
+ return None
+
+ get_config.restype = ctypes.c_char_p
+ config = get_config().split()
+ if config[0] == b"OpenBLAS":
+ return config[1].decode("utf-8")
+ return None
+
+ def _get_threading_layer(self):
+ """Return the threading layer of OpenBLAS"""
+ openblas_get_parallel = getattr(
+ self.dynlib,
+ "openblas_get_parallel",
+ getattr(self.dynlib, "openblas_get_parallel64_", None),
+ )
+ if openblas_get_parallel is None:
+ return "unknown"
+ threading_layer = openblas_get_parallel()
+ if threading_layer == 2:
+ return "openmp"
+ elif threading_layer == 1:
+ return "pthreads"
+ return "disabled"
+
+ def _get_architecture(self):
+ """Return the architecture detected by OpenBLAS"""
+ get_corename = getattr(
+ self.dynlib,
+ "openblas_get_corename",
+ getattr(self.dynlib, "openblas_get_corename64_", None),
+ )
+ if get_corename is None:
+ return None
+
+ get_corename.restype = ctypes.c_char_p
+ return get_corename().decode("utf-8")
+
+
+class BLISController(LibController):
+ """Controller class for BLIS"""
+
+ user_api = "blas"
+ internal_api = "blis"
+ filename_prefixes = ("libblis", "libblas")
+ check_symbols = (
+ "bli_thread_get_num_threads",
+ "bli_thread_set_num_threads",
+ "bli_info_get_version_str",
+ "bli_info_get_enable_openmp",
+ "bli_info_get_enable_pthreads",
+ "bli_arch_query_id",
+ "bli_arch_string",
+ )
+
+ def set_additional_attributes(self):
+ self.threading_layer = self._get_threading_layer()
+ self.architecture = self._get_architecture()
+
+ def get_num_threads(self):
+ get_func = getattr(self.dynlib, "bli_thread_get_num_threads", lambda: None)
+ num_threads = get_func()
+ # by default BLIS is single-threaded and get_num_threads
+ # returns -1. We map it to 1 for consistency with other libraries.
+ return 1 if num_threads == -1 else num_threads
+
+ def set_num_threads(self, num_threads):
+ set_func = getattr(
+ self.dynlib, "bli_thread_set_num_threads", lambda num_threads: None
+ )
+ return set_func(num_threads)
+
+ def get_version(self):
+ get_version_ = getattr(self.dynlib, "bli_info_get_version_str", None)
+ if get_version_ is None:
+ return None
+
+ get_version_.restype = ctypes.c_char_p
+ return get_version_().decode("utf-8")
+
+ def _get_threading_layer(self):
+ """Return the threading layer of BLIS"""
+ if getattr(self.dynlib, "bli_info_get_enable_openmp", lambda: False)():
+ return "openmp"
+ elif getattr(self.dynlib, "bli_info_get_enable_pthreads", lambda: False)():
+ return "pthreads"
+ return "disabled"
+
+ def _get_architecture(self):
+ """Return the architecture detected by BLIS"""
+ bli_arch_query_id = getattr(self.dynlib, "bli_arch_query_id", None)
+ bli_arch_string = getattr(self.dynlib, "bli_arch_string", None)
+ if bli_arch_query_id is None or bli_arch_string is None:
+ return None
+
+ # the true restype should be BLIS' arch_t (enum) but int should work
+ # for us:
+ bli_arch_query_id.restype = ctypes.c_int
+ bli_arch_string.restype = ctypes.c_char_p
+ return bli_arch_string(bli_arch_query_id()).decode("utf-8")
+
+
+class FlexiBLASController(LibController):
+ """Controller class for FlexiBLAS"""
+
+ user_api = "blas"
+ internal_api = "flexiblas"
+ filename_prefixes = ("libflexiblas",)
+ check_symbols = (
+ "flexiblas_get_num_threads",
+ "flexiblas_set_num_threads",
+ "flexiblas_get_version",
+ "flexiblas_list",
+ "flexiblas_list_loaded",
+ "flexiblas_current_backend",
+ )
+
+ @property
+ def loaded_backends(self):
+ return self._get_backend_list(loaded=True)
+
+ @property
+ def current_backend(self):
+ return self._get_current_backend()
+
+ def info(self):
+ """Return relevant info wrapped in a dict"""
+ # We override the info method because the loaded and current backends
+ # are dynamic properties
+ exposed_attrs = super().info()
+ exposed_attrs["loaded_backends"] = self.loaded_backends
+ exposed_attrs["current_backend"] = self.current_backend
+
+ return exposed_attrs
+
+ def set_additional_attributes(self):
+ self.available_backends = self._get_backend_list(loaded=False)
+
+ def get_num_threads(self):
+ get_func = getattr(self.dynlib, "flexiblas_get_num_threads", lambda: None)
+ num_threads = get_func()
+ # by default BLIS is single-threaded and get_num_threads
+ # returns -1. We map it to 1 for consistency with other libraries.
+ return 1 if num_threads == -1 else num_threads
+
+ def set_num_threads(self, num_threads):
+ set_func = getattr(
+ self.dynlib, "flexiblas_set_num_threads", lambda num_threads: None
+ )
+ return set_func(num_threads)
+
+ def get_version(self):
+ get_version_ = getattr(self.dynlib, "flexiblas_get_version", None)
+ if get_version_ is None:
+ return None
+
+ major = ctypes.c_int()
+ minor = ctypes.c_int()
+ patch = ctypes.c_int()
+ get_version_(ctypes.byref(major), ctypes.byref(minor), ctypes.byref(patch))
+ return f"{major.value}.{minor.value}.{patch.value}"
+
+ def _get_backend_list(self, loaded=False):
+ """Return the list of available backends for FlexiBLAS.
+
+ If loaded is False, return the list of available backends from the FlexiBLAS
+ configuration. If loaded is True, return the list of actually loaded backends.
+ """
+ func_name = f"flexiblas_list{'_loaded' if loaded else ''}"
+ get_backend_list_ = getattr(self.dynlib, func_name, None)
+ if get_backend_list_ is None:
+ return None
+
+ n_backends = get_backend_list_(None, 0, 0)
+
+ backends = []
+ for i in range(n_backends):
+ backend_name = ctypes.create_string_buffer(1024)
+ get_backend_list_(backend_name, 1024, i)
+ if backend_name.value.decode("utf-8") != "__FALLBACK__":
+ # We don't know when to expect __FALLBACK__ but it is not a real
+ # backend and does not show up when running flexiblas list.
+ backends.append(backend_name.value.decode("utf-8"))
+ return backends
+
+ def _get_current_backend(self):
+ """Return the backend of FlexiBLAS"""
+ get_backend_ = getattr(self.dynlib, "flexiblas_current_backend", None)
+ if get_backend_ is None:
+ return None
+
+ backend = ctypes.create_string_buffer(1024)
+ get_backend_(backend, ctypes.sizeof(backend))
+ return backend.value.decode("utf-8")
+
+ def switch_backend(self, backend):
+ """Switch the backend of FlexiBLAS
+
+ Parameters
+ ----------
+ backend : str
+ The name or the path to the shared library of the backend to switch to. If
+ the backend is not already loaded, it will be loaded first.
+ """
+ if backend not in self.loaded_backends:
+ if backend in self.available_backends:
+ load_func = getattr(self.dynlib, "flexiblas_load_backend", lambda _: -1)
+ else: # assume backend is a path to a shared library
+ load_func = getattr(
+ self.dynlib, "flexiblas_load_backend_library", lambda _: -1
+ )
+ res = load_func(str(backend).encode("utf-8"))
+ if res == -1:
+ raise RuntimeError(
+ f"Failed to load backend {backend!r}. It must either be the name of"
+ " a backend available in the FlexiBLAS configuration "
+ f"{self.available_backends} or the path to a valid shared library."
+ )
+
+ # Trigger a new search of loaded shared libraries since loading a new
+ # backend caused a dlopen.
+ self.parent._load_libraries()
+
+ switch_func = getattr(self.dynlib, "flexiblas_switch", lambda _: -1)
+ idx = self.loaded_backends.index(backend)
+ res = switch_func(idx)
+ if res == -1:
+ raise RuntimeError(f"Failed to switch to backend {backend!r}.")
+
+
+class MKLController(LibController):
+ """Controller class for MKL"""
+
+ user_api = "blas"
+ internal_api = "mkl"
+ filename_prefixes = ("libmkl_rt", "mkl_rt", "libblas")
+ check_symbols = (
+ "MKL_Get_Max_Threads",
+ "MKL_Set_Num_Threads",
+ "MKL_Get_Version_String",
+ "MKL_Set_Threading_Layer",
+ )
+
+ def set_additional_attributes(self):
+ self.threading_layer = self._get_threading_layer()
+
+ def get_num_threads(self):
+ get_func = getattr(self.dynlib, "MKL_Get_Max_Threads", lambda: None)
+ return get_func()
+
+ def set_num_threads(self, num_threads):
+ set_func = getattr(self.dynlib, "MKL_Set_Num_Threads", lambda num_threads: None)
+ return set_func(num_threads)
+
+ def get_version(self):
+ if not hasattr(self.dynlib, "MKL_Get_Version_String"):
+ return None
+
+ res = ctypes.create_string_buffer(200)
+ self.dynlib.MKL_Get_Version_String(res, 200)
+
+ version = res.value.decode("utf-8")
+ group = re.search(r"Version ([^ ]+) ", version)
+ if group is not None:
+ version = group.groups()[0]
+ return version.strip()
+
+ def _get_threading_layer(self):
+ """Return the threading layer of MKL"""
+ # The function mkl_set_threading_layer returns the current threading
+ # layer. Calling it with an invalid threading layer allows us to safely
+ # get the threading layer
+ set_threading_layer = getattr(
+ self.dynlib, "MKL_Set_Threading_Layer", lambda layer: -1
+ )
+ layer_map = {
+ 0: "intel",
+ 1: "sequential",
+ 2: "pgi",
+ 3: "gnu",
+ 4: "tbb",
+ -1: "not specified",
+ }
+ return layer_map[set_threading_layer(-1)]
+
+
+class OpenMPController(LibController):
+ """Controller class for OpenMP"""
+
+ user_api = "openmp"
+ internal_api = "openmp"
+ filename_prefixes = ("libiomp", "libgomp", "libomp", "vcomp")
+ check_symbols = (
+ "omp_get_max_threads",
+ "omp_get_num_threads",
+ )
+
+ def get_num_threads(self):
+ get_func = getattr(self.dynlib, "omp_get_max_threads", lambda: None)
+ return get_func()
+
+ def set_num_threads(self, num_threads):
+ set_func = getattr(self.dynlib, "omp_set_num_threads", lambda num_threads: None)
+ return set_func(num_threads)
+
+ def get_version(self):
+ # There is no way to get the version number programmatically in OpenMP.
+ return None
+
+
+# Controllers for the libraries that we'll look for in the loaded libraries.
+# Third party libraries can register their own controllers.
+_ALL_CONTROLLERS = [
+ OpenBLASController,
+ BLISController,
+ MKLController,
+ OpenMPController,
+ FlexiBLASController,
+]
+
+# Helpers for the doc and test names
+_ALL_USER_APIS = list(set(lib.user_api for lib in _ALL_CONTROLLERS))
+_ALL_INTERNAL_APIS = [lib.internal_api for lib in _ALL_CONTROLLERS]
+_ALL_PREFIXES = list(
+ set(prefix for lib in _ALL_CONTROLLERS for prefix in lib.filename_prefixes)
+)
+_ALL_BLAS_LIBRARIES = [
+ lib.internal_api for lib in _ALL_CONTROLLERS if lib.user_api == "blas"
+]
+_ALL_OPENMP_LIBRARIES = OpenMPController.filename_prefixes
+
+
+def register(controller):
+ """Register a new controller"""
+ _ALL_CONTROLLERS.append(controller)
+ _ALL_USER_APIS.append(controller.user_api)
+ _ALL_INTERNAL_APIS.append(controller.internal_api)
+ _ALL_PREFIXES.extend(controller.filename_prefixes)
+
+
+def _format_docstring(*args, **kwargs):
+ def decorator(o):
+ if o.__doc__ is not None:
+ o.__doc__ = o.__doc__.format(*args, **kwargs)
+ return o
+
+ return decorator
+
+
+@lru_cache(maxsize=10000)
+def _realpath(filepath):
+ """Small caching wrapper around os.path.realpath to limit system calls"""
+ return os.path.realpath(filepath)
+
+
+@_format_docstring(USER_APIS=list(_ALL_USER_APIS), INTERNAL_APIS=_ALL_INTERNAL_APIS)
+def threadpool_info():
+ """Return the maximal number of threads for each detected library.
+
+ Return a list with all the supported libraries that have been found. Each
+ library is represented by a dict with the following information:
+
+ - "user_api" : user API. Possible values are {USER_APIS}.
+ - "internal_api": internal API. Possible values are {INTERNAL_APIS}.
+ - "prefix" : filename prefix of the specific implementation.
+ - "filepath": path to the loaded library.
+ - "version": version of the library (if available).
+ - "num_threads": the current thread limit.
+
+ In addition, each library may contain internal_api specific entries.
+ """
+ return ThreadpoolController().info()
+
+
+class _ThreadpoolLimiter:
+ """The guts of ThreadpoolController.limit
+
+ Refer to the docstring of ThreadpoolController.limit for more details.
+
+ It will only act on the library controllers held by the provided `controller`.
+ Using the default constructor sets the limits right away such that it can be used as
+ a callable. Setting the limits can be delayed by using the `wrap` class method such
+ that it can be used as a decorator.
+ """
+
+ def __init__(self, controller, *, limits=None, user_api=None):
+ self._controller = controller
+ self._limits, self._user_api, self._prefixes = self._check_params(
+ limits, user_api
+ )
+ self._original_info = self._controller.info()
+ self._set_threadpool_limits()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.restore_original_limits()
+
+ @classmethod
+ def wrap(cls, controller, *, limits=None, user_api=None):
+ """Return an instance of this class that can be used as a decorator"""
+ return _ThreadpoolLimiterDecorator(
+ controller=controller, limits=limits, user_api=user_api
+ )
+
+ def restore_original_limits(self):
+ """Set the limits back to their original values"""
+ for lib_controller, original_info in zip(
+ self._controller.lib_controllers, self._original_info
+ ):
+ lib_controller.set_num_threads(original_info["num_threads"])
+
+ # Alias of `restore_original_limits` for backward compatibility
+ unregister = restore_original_limits
+
+ def get_original_num_threads(self):
+ """Original num_threads from before calling threadpool_limits
+
+ Return a dict `{user_api: num_threads}`.
+ """
+ num_threads = {}
+ warning_apis = []
+
+ for user_api in self._user_api:
+ limits = [
+ lib_info["num_threads"]
+ for lib_info in self._original_info
+ if lib_info["user_api"] == user_api
+ ]
+ limits = set(limits)
+ n_limits = len(limits)
+
+ if n_limits == 1:
+ limit = limits.pop()
+ elif n_limits == 0:
+ limit = None
+ else:
+ limit = min(limits)
+ warning_apis.append(user_api)
+
+ num_threads[user_api] = limit
+
+ if warning_apis:
+ warnings.warn(
+ "Multiple value possible for following user apis: "
+ + ", ".join(warning_apis)
+ + ". Returning the minimum."
+ )
+
+ return num_threads
+
+ def _check_params(self, limits, user_api):
+ """Suitable values for the _limits, _user_api and _prefixes attributes"""
+
+ if isinstance(limits, str) and limits == "sequential_blas_under_openmp":
+ (
+ limits,
+ user_api,
+ ) = self._controller._get_params_for_sequential_blas_under_openmp().values()
+
+ if limits is None or isinstance(limits, int):
+ if user_api is None:
+ user_api = _ALL_USER_APIS
+ elif user_api in _ALL_USER_APIS:
+ user_api = [user_api]
+ else:
+ raise ValueError(
+ f"user_api must be either in {_ALL_USER_APIS} or None. Got "
+ f"{user_api} instead."
+ )
+
+ if limits is not None:
+ limits = {api: limits for api in user_api}
+ prefixes = []
+ else:
+ if isinstance(limits, list):
+ # This should be a list of dicts of library info, for
+ # compatibility with the result from threadpool_info.
+ limits = {
+ lib_info["prefix"]: lib_info["num_threads"] for lib_info in limits
+ }
+ elif isinstance(limits, ThreadpoolController):
+ # To set the limits from the library controllers of a
+ # ThreadpoolController object.
+ limits = {
+ lib_controller.prefix: lib_controller.num_threads
+ for lib_controller in limits.lib_controllers
+ }
+
+ if not isinstance(limits, dict):
+ raise TypeError(
+ "limits must either be an int, a list, a dict, or "
+ f"'sequential_blas_under_openmp'. Got {type(limits)} instead"
+ )
+
+ # With a dictionary, can set both specific limit for given
+ # libraries and global limit for user_api. Fetch each separately.
+ prefixes = [prefix for prefix in limits if prefix in _ALL_PREFIXES]
+ user_api = [api for api in limits if api in _ALL_USER_APIS]
+
+ return limits, user_api, prefixes
+
+ def _set_threadpool_limits(self):
+ """Change the maximal number of threads in selected thread pools.
+
+ Return a list with all the supported libraries that have been found
+ matching `self._prefixes` and `self._user_api`.
+ """
+ if self._limits is None:
+ return
+
+ for lib_controller in self._controller.lib_controllers:
+ # self._limits is a dict {key: num_threads} where key is either
+ # a prefix or a user_api. If a library matches both, the limit
+ # corresponding to the prefix is chosen.
+ if lib_controller.prefix in self._limits:
+ num_threads = self._limits[lib_controller.prefix]
+ elif lib_controller.user_api in self._limits:
+ num_threads = self._limits[lib_controller.user_api]
+ else:
+ continue
+
+ if num_threads is not None:
+ lib_controller.set_num_threads(num_threads)
+
+
+class _ThreadpoolLimiterDecorator(_ThreadpoolLimiter, ContextDecorator):
+ """Same as _ThreadpoolLimiter but to be used as a decorator"""
+
+ def __init__(self, controller, *, limits=None, user_api=None):
+ self._limits, self._user_api, self._prefixes = self._check_params(
+ limits, user_api
+ )
+ self._controller = controller
+
+ def __enter__(self):
+ # we need to set the limits here and not in the __init__ because we want the
+ # limits to be set when calling the decorated function, not when creating the
+ # decorator.
+ self._original_info = self._controller.info()
+ self._set_threadpool_limits()
+ return self
+
+
+@_format_docstring(
+ USER_APIS=", ".join(f'"{api}"' for api in _ALL_USER_APIS),
+ BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES),
+ OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES),
+)
+class threadpool_limits(_ThreadpoolLimiter):
+ """Change the maximal number of threads that can be used in thread pools.
+
+ This object can be used either as a callable (the construction of this object
+ limits the number of threads), as a context manager in a `with` block to
+ automatically restore the original state of the controlled libraries when exiting
+ the block, or as a decorator through its `wrap` method.
+
+ Set the maximal number of threads that can be used in thread pools used in
+ the supported libraries to `limit`. This function works for libraries that
+ are already loaded in the interpreter and can be changed dynamically.
+
+ This effect is global and impacts the whole Python process. There is no thread level
+ isolation as these libraries do not offer thread-local APIs to configure the number
+ of threads to use in nested parallel calls.
+
+ Parameters
+ ----------
+ limits : int, dict, 'sequential_blas_under_openmp' or None (default=None)
+ The maximal number of threads that can be used in thread pools
+
+ - If int, sets the maximum number of threads to `limits` for each
+ library selected by `user_api`.
+
+ - If it is a dictionary `{{key: max_threads}}`, this function sets a
+ custom maximum number of threads for each `key` which can be either a
+ `user_api` or a `prefix` for a specific library.
+
+ - If 'sequential_blas_under_openmp', it will chose the appropriate `limits`
+ and `user_api` parameters for the specific use case of sequential BLAS
+ calls within an OpenMP parallel region. The `user_api` parameter is
+ ignored.
+
+ - If None, this function does not do anything.
+
+ user_api : {USER_APIS} or None (default=None)
+ APIs of libraries to limit. Used only if `limits` is an int.
+
+ - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}).
+
+ - If "openmp", it will only limit OpenMP supported libraries
+ ({OPENMP_LIBS}). Note that it can affect the number of threads used
+ by the BLAS libraries if they rely on OpenMP.
+
+ - If None, this function will apply to all supported libraries.
+ """
+
+ def __init__(self, limits=None, user_api=None):
+ super().__init__(ThreadpoolController(), limits=limits, user_api=user_api)
+
+ @classmethod
+ def wrap(cls, limits=None, user_api=None):
+ return super().wrap(ThreadpoolController(), limits=limits, user_api=user_api)
+
+
+class ThreadpoolController:
+ """Collection of LibController objects for all loaded supported libraries
+
+ Attributes
+ ----------
+ lib_controllers : list of `LibController` objects
+ The list of library controllers of all loaded supported libraries.
+ """
+
+ # Cache for libc under POSIX and a few system libraries under Windows.
+ # We use a class level cache instead of an instance level cache because
+ # it's very unlikely that a shared library will be unloaded and reloaded
+ # during the lifetime of a program.
+ _system_libraries = dict()
+
+ def __init__(self):
+ self.lib_controllers = []
+ self._load_libraries()
+ self._warn_if_incompatible_openmp()
+
+ @classmethod
+ def _from_controllers(cls, lib_controllers):
+ new_controller = cls.__new__(cls)
+ new_controller.lib_controllers = lib_controllers
+ return new_controller
+
+ def info(self):
+ """Return lib_controllers info as a list of dicts"""
+ return [lib_controller.info() for lib_controller in self.lib_controllers]
+
+ def select(self, **kwargs):
+ """Return a ThreadpoolController containing a subset of its current
+ library controllers
+
+ It will select all libraries matching at least one pair (key, value) from kwargs
+ where key is an entry of the library info dict (like "user_api", "internal_api",
+ "prefix", ...) and value is the value or a list of acceptable values for that
+ entry.
+
+ For instance, `ThreadpoolController().select(internal_api=["blis", "openblas"])`
+ will select all library controllers whose internal_api is either "blis" or
+ "openblas".
+ """
+ for key, vals in kwargs.items():
+ kwargs[key] = [vals] if not isinstance(vals, list) else vals
+
+ lib_controllers = [
+ lib_controller
+ for lib_controller in self.lib_controllers
+ if any(
+ getattr(lib_controller, key, None) in vals
+ for key, vals in kwargs.items()
+ )
+ ]
+
+ return ThreadpoolController._from_controllers(lib_controllers)
+
+ def _get_params_for_sequential_blas_under_openmp(self):
+ """Return appropriate params to use for a sequential BLAS call in an OpenMP loop
+
+ This function takes into account the unexpected behavior of OpenBLAS with the
+ OpenMP threading layer.
+ """
+ if self.select(
+ internal_api="openblas", threading_layer="openmp"
+ ).lib_controllers:
+ return {"limits": None, "user_api": None}
+ return {"limits": 1, "user_api": "blas"}
+
+ @_format_docstring(
+ USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS),
+ BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES),
+ OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES),
+ )
+ def limit(self, *, limits=None, user_api=None):
+ """Change the maximal number of threads that can be used in thread pools.
+
+ This function returns an object that can be used either as a callable (the
+ construction of this object limits the number of threads) or as a context
+ manager, in a `with` block to automatically restore the original state of the
+ controlled libraries when exiting the block.
+
+ Set the maximal number of threads that can be used in thread pools used in
+ the supported libraries to `limits`. This function works for libraries that
+ are already loaded in the interpreter and can be changed dynamically.
+
+ This effect is global and impacts the whole Python process. There is no thread
+ level isolation as these libraries do not offer thread-local APIs to configure
+ the number of threads to use in nested parallel calls.
+
+ Parameters
+ ----------
+ limits : int, dict, 'sequential_blas_under_openmp' or None (default=None)
+ The maximal number of threads that can be used in thread pools
+
+ - If int, sets the maximum number of threads to `limits` for each
+ library selected by `user_api`.
+
+ - If it is a dictionary `{{key: max_threads}}`, this function sets a
+ custom maximum number of threads for each `key` which can be either a
+ `user_api` or a `prefix` for a specific library.
+
+ - If 'sequential_blas_under_openmp', it will chose the appropriate `limits`
+ and `user_api` parameters for the specific use case of sequential BLAS
+ calls within an OpenMP parallel region. The `user_api` parameter is
+ ignored.
+
+ - If None, this function does not do anything.
+
+ user_api : {USER_APIS} or None (default=None)
+ APIs of libraries to limit. Used only if `limits` is an int.
+
+ - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}).
+
+ - If "openmp", it will only limit OpenMP supported libraries
+ ({OPENMP_LIBS}). Note that it can affect the number of threads used
+ by the BLAS libraries if they rely on OpenMP.
+
+ - If None, this function will apply to all supported libraries.
+ """
+ return _ThreadpoolLimiter(self, limits=limits, user_api=user_api)
+
+ @_format_docstring(
+ USER_APIS=", ".join('"{}"'.format(api) for api in _ALL_USER_APIS),
+ BLAS_LIBS=", ".join(_ALL_BLAS_LIBRARIES),
+ OPENMP_LIBS=", ".join(_ALL_OPENMP_LIBRARIES),
+ )
+ def wrap(self, *, limits=None, user_api=None):
+ """Change the maximal number of threads that can be used in thread pools.
+
+ This function returns an object that can be used as a decorator.
+
+ Set the maximal number of threads that can be used in thread pools used in
+ the supported libraries to `limits`. This function works for libraries that
+ are already loaded in the interpreter and can be changed dynamically.
+
+ Parameters
+ ----------
+ limits : int, dict or None (default=None)
+ The maximal number of threads that can be used in thread pools
+
+ - If int, sets the maximum number of threads to `limits` for each
+ library selected by `user_api`.
+
+ - If it is a dictionary `{{key: max_threads}}`, this function sets a
+ custom maximum number of threads for each `key` which can be either a
+ `user_api` or a `prefix` for a specific library.
+
+ - If None, this function does not do anything.
+
+ user_api : {USER_APIS} or None (default=None)
+ APIs of libraries to limit. Used only if `limits` is an int.
+
+ - If "blas", it will only limit BLAS supported libraries ({BLAS_LIBS}).
+
+ - If "openmp", it will only limit OpenMP supported libraries
+ ({OPENMP_LIBS}). Note that it can affect the number of threads used
+ by the BLAS libraries if they rely on OpenMP.
+
+ - If None, this function will apply to all supported libraries.
+ """
+ return _ThreadpoolLimiter.wrap(self, limits=limits, user_api=user_api)
+
+ def __len__(self):
+ return len(self.lib_controllers)
+
+ def _load_libraries(self):
+ """Loop through loaded shared libraries and store the supported ones"""
+ if sys.platform == "darwin":
+ self._find_libraries_with_dyld()
+ elif sys.platform == "win32":
+ self._find_libraries_with_enum_process_module_ex()
+ elif "pyodide" in sys.modules:
+ self._find_libraries_pyodide()
+ else:
+ self._find_libraries_with_dl_iterate_phdr()
+
+ def _find_libraries_with_dl_iterate_phdr(self):
+ """Loop through loaded libraries and return binders on supported ones
+
+ This function is expected to work on POSIX system only.
+ This code is adapted from code by Intel developer @anton-malakhov
+ available at https://github.com/IntelPython/smp
+
+ Copyright (c) 2017, Intel Corporation published under the BSD 3-Clause
+ license
+ """
+ libc = self._get_libc()
+ if not hasattr(libc, "dl_iterate_phdr"): # pragma: no cover
+ warnings.warn(
+ "Could not find dl_iterate_phdr in the C standard library.",
+ RuntimeWarning,
+ )
+ return []
+
+ # Callback function for `dl_iterate_phdr` which is called for every
+ # library loaded in the current process until it returns 1.
+ def match_library_callback(info, size, data):
+ # Get the path of the current library
+ filepath = info.contents.dlpi_name
+ if filepath:
+ filepath = filepath.decode("utf-8")
+
+ # Store the library controller if it is supported and selected
+ self._make_controller_from_path(filepath)
+ return 0
+
+ c_func_signature = ctypes.CFUNCTYPE(
+ ctypes.c_int, # Return type
+ ctypes.POINTER(_dl_phdr_info),
+ ctypes.c_size_t,
+ ctypes.c_char_p,
+ )
+ c_match_library_callback = c_func_signature(match_library_callback)
+
+ data = ctypes.c_char_p(b"")
+ libc.dl_iterate_phdr(c_match_library_callback, data)
+
+ def _find_libraries_with_dyld(self):
+ """Loop through loaded libraries and return binders on supported ones
+
+ This function is expected to work on OSX system only
+ """
+ libc = self._get_libc()
+ if not hasattr(libc, "_dyld_image_count"): # pragma: no cover
+ warnings.warn(
+ "Could not find _dyld_image_count in the C standard library.",
+ RuntimeWarning,
+ )
+ return []
+
+ n_dyld = libc._dyld_image_count()
+ libc._dyld_get_image_name.restype = ctypes.c_char_p
+
+ for i in range(n_dyld):
+ filepath = ctypes.string_at(libc._dyld_get_image_name(i))
+ filepath = filepath.decode("utf-8")
+
+ # Store the library controller if it is supported and selected
+ self._make_controller_from_path(filepath)
+
+ def _find_libraries_with_enum_process_module_ex(self):
+ """Loop through loaded libraries and return binders on supported ones
+
+ This function is expected to work on windows system only.
+ This code is adapted from code by Philipp Hagemeister @phihag available
+ at https://stackoverflow.com/questions/17474574
+ """
+ from ctypes.wintypes import DWORD, HMODULE, MAX_PATH
+
+ PROCESS_QUERY_INFORMATION = 0x0400
+ PROCESS_VM_READ = 0x0010
+
+ LIST_LIBRARIES_ALL = 0x03
+
+ ps_api = self._get_windll("Psapi")
+ kernel_32 = self._get_windll("kernel32")
+
+ h_process = kernel_32.OpenProcess(
+ PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, False, os.getpid()
+ )
+ if not h_process: # pragma: no cover
+ raise OSError(f"Could not open PID {os.getpid()}")
+
+ try:
+ buf_count = 256
+ needed = DWORD()
+ # Grow the buffer until it becomes large enough to hold all the
+ # module headers
+ while True:
+ buf = (HMODULE * buf_count)()
+ buf_size = ctypes.sizeof(buf)
+ if not ps_api.EnumProcessModulesEx(
+ h_process,
+ ctypes.byref(buf),
+ buf_size,
+ ctypes.byref(needed),
+ LIST_LIBRARIES_ALL,
+ ):
+ raise OSError("EnumProcessModulesEx failed")
+ if buf_size >= needed.value:
+ break
+ buf_count = needed.value // (buf_size // buf_count)
+
+ count = needed.value // (buf_size // buf_count)
+ h_modules = map(HMODULE, buf[:count])
+
+ # Loop through all the module headers and get the library path
+ buf = ctypes.create_unicode_buffer(MAX_PATH)
+ n_size = DWORD()
+ for h_module in h_modules:
+ # Get the path of the current module
+ if not ps_api.GetModuleFileNameExW(
+ h_process, h_module, ctypes.byref(buf), ctypes.byref(n_size)
+ ):
+ raise OSError("GetModuleFileNameEx failed")
+ filepath = buf.value
+
+ # Store the library controller if it is supported and selected
+ self._make_controller_from_path(filepath)
+ finally:
+ kernel_32.CloseHandle(h_process)
+
+ def _find_libraries_pyodide(self):
+ """Pyodide specific implementation for finding loaded libraries.
+
+ Adapted from suggestion in https://github.com/joblib/threadpoolctl/pull/169#issuecomment-1946696449.
+
+ One day, we may have a simpler solution. libc dl_iterate_phdr needs to
+ be implemented in Emscripten and exposed in Pyodide, see
+ https://github.com/emscripten-core/emscripten/issues/21354 for more
+ details.
+ """
+ try:
+ from pyodide_js._module import LDSO
+ except ImportError:
+ warnings.warn(
+ "Unable to import LDSO from pyodide_js._module. This should never "
+ "happen."
+ )
+ return
+
+ for filepath in LDSO.loadedLibsByName.as_object_map():
+ # Some libraries are duplicated by Pyodide and do not exist in the
+ # filesystem, so we first check for the existence of the file. For
+ # more details, see
+ # https://github.com/joblib/threadpoolctl/pull/169#issuecomment-1947946728
+ if os.path.exists(filepath):
+ self._make_controller_from_path(filepath)
+
+ def _make_controller_from_path(self, filepath):
+ """Store a library controller if it is supported and selected"""
+ # Required to resolve symlinks
+ filepath = _realpath(filepath)
+ # `lower` required to take account of OpenMP dll case on Windows
+ # (vcomp, VCOMP, Vcomp, ...)
+ filename = os.path.basename(filepath).lower()
+
+ # Loop through supported libraries to find if this filename corresponds
+ # to a supported one.
+ for controller_class in _ALL_CONTROLLERS:
+ # check if filename matches a supported prefix
+ prefix = self._check_prefix(filename, controller_class.filename_prefixes)
+
+ # filename does not match any of the prefixes of the candidate
+ # library. move to next library.
+ if prefix is None:
+ continue
+
+ # workaround for BLAS libraries packaged by conda-forge on windows, which
+ # are all renamed "libblas.dll". We thus have to check to which BLAS
+ # implementation it actually corresponds looking for implementation
+ # specific symbols.
+ if prefix == "libblas":
+ if filename.endswith(".dll"):
+ libblas = ctypes.CDLL(filepath, _RTLD_NOLOAD)
+ if not any(
+ hasattr(libblas, func)
+ for func in controller_class.check_symbols
+ ):
+ continue
+ else:
+ # We ignore libblas on other platforms than windows because there
+ # might be a libblas dso comming with openblas for instance that
+ # can't be used to instantiate a pertinent LibController (many
+ # symbols are missing) and would create confusion by making a
+ # duplicate entry in threadpool_info.
+ continue
+
+ # filename matches a prefix. Now we check if the library has the symbols we
+ # are looking for. If none of the symbols exists, it's very likely not the
+ # expected library (e.g. a library having a common prefix with one of the
+ # our supported libraries). Otherwise, create and store the library
+ # controller.
+ lib_controller = controller_class(
+ filepath=filepath, prefix=prefix, parent=self
+ )
+
+ if filepath in (lib.filepath for lib in self.lib_controllers):
+ # We already have a controller for this library.
+ continue
+
+ if not hasattr(controller_class, "check_symbols") or any(
+ hasattr(lib_controller.dynlib, func)
+ for func in controller_class.check_symbols
+ ):
+ self.lib_controllers.append(lib_controller)
+
+ def _check_prefix(self, library_basename, filename_prefixes):
+ """Return the prefix library_basename starts with
+
+ Return None if none matches.
+ """
+ for prefix in filename_prefixes:
+ if library_basename.startswith(prefix):
+ return prefix
+ return None
+
+ def _warn_if_incompatible_openmp(self):
+ """Raise a warning if llvm-OpenMP and intel-OpenMP are both loaded"""
+ prefixes = [lib_controller.prefix for lib_controller in self.lib_controllers]
+ msg = textwrap.dedent(
+ """
+ Found Intel OpenMP ('libiomp') and LLVM OpenMP ('libomp') loaded at
+ the same time. Both libraries are known to be incompatible and this
+ can cause random crashes or deadlocks on Linux when loaded in the
+ same Python program.
+ Using threadpoolctl may cause crashes or deadlocks. For more
+ information and possible workarounds, please see
+ https://github.com/joblib/threadpoolctl/blob/master/multiple_openmp.md
+ """
+ )
+ if "libomp" in prefixes and "libiomp" in prefixes:
+ warnings.warn(msg, RuntimeWarning)
+
+ @classmethod
+ def _get_libc(cls):
+ """Load the lib-C for unix systems."""
+ libc = cls._system_libraries.get("libc")
+ if libc is None:
+ # Remark: If libc is statically linked or if Python is linked against an
+ # alternative implementation of libc like musl, find_library will return
+ # None and CDLL will load the main program itself which should contain the
+ # libc symbols. We still name it libc for convenience.
+ # If the main program does not contain the libc symbols, it's ok because
+ # we check their presence later anyway.
+ libc = ctypes.CDLL(find_library("c"), mode=_RTLD_NOLOAD)
+ cls._system_libraries["libc"] = libc
+ return libc
+
+ @classmethod
+ def _get_windll(cls, dll_name):
+ """Load a windows DLL"""
+ dll = cls._system_libraries.get(dll_name)
+ if dll is None:
+ dll = ctypes.WinDLL(f"{dll_name}.dll")
+ cls._system_libraries[dll_name] = dll
+ return dll
+
+
+def _main():
+ """Commandline interface to display thread-pool information and exit."""
+ import argparse
+ import importlib
+ import json
+ import sys
+
+ parser = argparse.ArgumentParser(
+ usage="python -m threadpoolctl -i numpy scipy.linalg xgboost",
+ description="Display thread-pool information and exit.",
+ )
+ parser.add_argument(
+ "-i",
+ "--import",
+ dest="modules",
+ nargs="*",
+ default=(),
+ help="Python modules to import before introspecting thread-pools.",
+ )
+ parser.add_argument(
+ "-c",
+ "--command",
+ help="a Python statement to execute before introspecting thread-pools.",
+ )
+
+ options = parser.parse_args(sys.argv[1:])
+ for module in options.modules:
+ try:
+ importlib.import_module(module, package=None)
+ except ImportError:
+ print("WARNING: could not import", module, file=sys.stderr)
+
+ if options.command:
+ exec(options.command)
+
+ print(json.dumps(threadpool_info(), indent=2))
+
+
+if __name__ == "__main__":
+ _main()
diff --git a/env-llmeval/lib/python3.10/site-packages/typing_extensions.py b/env-llmeval/lib/python3.10/site-packages/typing_extensions.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ccd519ce4f16345bd77a0eae03deaae16bf4062
--- /dev/null
+++ b/env-llmeval/lib/python3.10/site-packages/typing_extensions.py
@@ -0,0 +1,3332 @@
+import abc
+import collections
+import collections.abc
+import functools
+import inspect
+import operator
+import sys
+import types as _types
+import typing
+import warnings
+
+__all__ = [
+ # Super-special typing primitives.
+ 'Any',
+ 'ClassVar',
+ 'Concatenate',
+ 'Final',
+ 'LiteralString',
+ 'ParamSpec',
+ 'ParamSpecArgs',
+ 'ParamSpecKwargs',
+ 'Self',
+ 'Type',
+ 'TypeVar',
+ 'TypeVarTuple',
+ 'Unpack',
+
+ # ABCs (from collections.abc).
+ 'Awaitable',
+ 'AsyncIterator',
+ 'AsyncIterable',
+ 'Coroutine',
+ 'AsyncGenerator',
+ 'AsyncContextManager',
+ 'Buffer',
+ 'ChainMap',
+
+ # Concrete collection types.
+ 'ContextManager',
+ 'Counter',
+ 'Deque',
+ 'DefaultDict',
+ 'NamedTuple',
+ 'OrderedDict',
+ 'TypedDict',
+
+ # Structural checks, a.k.a. protocols.
+ 'SupportsAbs',
+ 'SupportsBytes',
+ 'SupportsComplex',
+ 'SupportsFloat',
+ 'SupportsIndex',
+ 'SupportsInt',
+ 'SupportsRound',
+
+ # One-off things.
+ 'Annotated',
+ 'assert_never',
+ 'assert_type',
+ 'clear_overloads',
+ 'dataclass_transform',
+ 'deprecated',
+ 'Doc',
+ 'get_overloads',
+ 'final',
+ 'get_args',
+ 'get_origin',
+ 'get_original_bases',
+ 'get_protocol_members',
+ 'get_type_hints',
+ 'IntVar',
+ 'is_protocol',
+ 'is_typeddict',
+ 'Literal',
+ 'NewType',
+ 'overload',
+ 'override',
+ 'Protocol',
+ 'reveal_type',
+ 'runtime',
+ 'runtime_checkable',
+ 'Text',
+ 'TypeAlias',
+ 'TypeAliasType',
+ 'TypeGuard',
+ 'TypeIs',
+ 'TYPE_CHECKING',
+ 'Never',
+ 'NoReturn',
+ 'ReadOnly',
+ 'Required',
+ 'NotRequired',
+
+ # Pure aliases, have always been in typing
+ 'AbstractSet',
+ 'AnyStr',
+ 'BinaryIO',
+ 'Callable',
+ 'Collection',
+ 'Container',
+ 'Dict',
+ 'ForwardRef',
+ 'FrozenSet',
+ 'Generator',
+ 'Generic',
+ 'Hashable',
+ 'IO',
+ 'ItemsView',
+ 'Iterable',
+ 'Iterator',
+ 'KeysView',
+ 'List',
+ 'Mapping',
+ 'MappingView',
+ 'Match',
+ 'MutableMapping',
+ 'MutableSequence',
+ 'MutableSet',
+ 'Optional',
+ 'Pattern',
+ 'Reversible',
+ 'Sequence',
+ 'Set',
+ 'Sized',
+ 'TextIO',
+ 'Tuple',
+ 'Union',
+ 'ValuesView',
+ 'cast',
+ 'no_type_check',
+ 'no_type_check_decorator',
+]
+
+# for backward compatibility
+PEP_560 = True
+GenericMeta = type
+
+# The functions below are modified copies of typing internal helpers.
+# They are needed by _ProtocolMeta and they provide support for PEP 646.
+
+
+class _Sentinel:
+ def __repr__(self):
+ return ""
+
+
+_marker = _Sentinel()
+
+
+if sys.version_info >= (3, 10):
+ def _should_collect_from_parameters(t):
+ return isinstance(
+ t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
+ )
+elif sys.version_info >= (3, 9):
+ def _should_collect_from_parameters(t):
+ return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
+else:
+ def _should_collect_from_parameters(t):
+ return isinstance(t, typing._GenericAlias) and not t._special
+
+
+NoReturn = typing.NoReturn
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar('T') # Any type.
+KT = typing.TypeVar('KT') # Key type.
+VT = typing.TypeVar('VT') # Value type.
+T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
+T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
+
+
+if sys.version_info >= (3, 11):
+ from typing import Any
+else:
+
+ class _AnyMeta(type):
+ def __instancecheck__(self, obj):
+ if self is Any:
+ raise TypeError("typing_extensions.Any cannot be used with isinstance()")
+ return super().__instancecheck__(obj)
+
+ def __repr__(self):
+ if self is Any:
+ return "typing_extensions.Any"
+ return super().__repr__()
+
+ class Any(metaclass=_AnyMeta):
+ """Special type indicating an unconstrained type.
+ - Any is compatible with every type.
+ - Any assumed to have all methods.
+ - All values assumed to be instances of Any.
+ Note that all the above statements are true from the point of view of
+ static type checkers. At runtime, Any should not be used with instance
+ checks.
+ """
+ def __new__(cls, *args, **kwargs):
+ if cls is Any:
+ raise TypeError("Any cannot be instantiated")
+ return super().__new__(cls, *args, **kwargs)
+
+
+ClassVar = typing.ClassVar
+
+
+class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+
+Final = typing.Final
+
+if sys.version_info >= (3, 11):
+ final = typing.final
+else:
+ # @final exists in 3.8+, but we backport it for all versions
+ # before 3.11 to keep support for the __final__ attribute.
+ # See https://bugs.python.org/issue46342
+ def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties. The decorator
+ sets the ``__final__`` attribute to ``True`` on the decorated object
+ to allow runtime introspection.
+ """
+ try:
+ f.__final__ = True
+ except (AttributeError, TypeError):
+ # Skip the attribute silently if it is not writable.
+ # AttributeError happens if the object has __slots__ or a
+ # read-only property, TypeError if it's a builtin class.
+ pass
+ return f
+
+
+def IntVar(name):
+ return typing.TypeVar(name)
+
+
+# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8
+if sys.version_info >= (3, 10, 1):
+ Literal = typing.Literal
+else:
+ def _flatten_literal_params(parameters):
+ """An internal helper for Literal creation: flatten Literals among parameters"""
+ params = []
+ for p in parameters:
+ if isinstance(p, _LiteralGenericAlias):
+ params.extend(p.__args__)
+ else:
+ params.append(p)
+ return tuple(params)
+
+ def _value_and_type_iter(params):
+ for p in params:
+ yield p, type(p)
+
+ class _LiteralGenericAlias(typing._GenericAlias, _root=True):
+ def __eq__(self, other):
+ if not isinstance(other, _LiteralGenericAlias):
+ return NotImplemented
+ these_args_deduped = set(_value_and_type_iter(self.__args__))
+ other_args_deduped = set(_value_and_type_iter(other.__args__))
+ return these_args_deduped == other_args_deduped
+
+ def __hash__(self):
+ return hash(frozenset(_value_and_type_iter(self.__args__)))
+
+ class _LiteralForm(_ExtensionsSpecialForm, _root=True):
+ def __init__(self, doc: str):
+ self._name = 'Literal'
+ self._doc = self.__doc__ = doc
+
+ def __getitem__(self, parameters):
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+
+ parameters = _flatten_literal_params(parameters)
+
+ val_type_pairs = list(_value_and_type_iter(parameters))
+ try:
+ deduped_pairs = set(val_type_pairs)
+ except TypeError:
+ # unhashable parameters
+ pass
+ else:
+ # similar logic to typing._deduplicate on Python 3.9+
+ if len(deduped_pairs) < len(val_type_pairs):
+ new_parameters = []
+ for pair in val_type_pairs:
+ if pair in deduped_pairs:
+ new_parameters.append(pair[0])
+ deduped_pairs.remove(pair)
+ assert not deduped_pairs, deduped_pairs
+ parameters = tuple(new_parameters)
+
+ return _LiteralGenericAlias(self, parameters)
+
+ Literal = _LiteralForm(doc="""\
+ A type that can be used to indicate to type checkers
+ that the corresponding value has a value literally equivalent
+ to the provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to
+ the value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime
+ checking verifying that the parameter is actually a value
+ instead of a type.""")
+
+
+_overload_dummy = typing._overload_dummy
+
+
+if hasattr(typing, "get_overloads"): # 3.11+
+ overload = typing.overload
+ get_overloads = typing.get_overloads
+ clear_overloads = typing.clear_overloads
+else:
+ # {module: {qualname: {firstlineno: func}}}
+ _overload_registry = collections.defaultdict(
+ functools.partial(collections.defaultdict, dict)
+ )
+
+ def overload(func):
+ """Decorator for overloaded functions/methods.
+
+ In a stub file, place two or more stub definitions for the same
+ function in a row, each decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+
+ In a non-stub file (i.e. a regular .py file), do the same but
+ follow it with an implementation. The implementation should *not*
+ be decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+ def utf8(value):
+ # implementation goes here
+
+ The overloads for a function can be retrieved at runtime using the
+ get_overloads() function.
+ """
+ # classmethod and staticmethod
+ f = getattr(func, "__func__", func)
+ try:
+ _overload_registry[f.__module__][f.__qualname__][
+ f.__code__.co_firstlineno
+ ] = func
+ except AttributeError:
+ # Not a normal function; ignore.
+ pass
+ return _overload_dummy
+
+ def get_overloads(func):
+ """Return all defined overloads for *func* as a sequence."""
+ # classmethod and staticmethod
+ f = getattr(func, "__func__", func)
+ if f.__module__ not in _overload_registry:
+ return []
+ mod_dict = _overload_registry[f.__module__]
+ if f.__qualname__ not in mod_dict:
+ return []
+ return list(mod_dict[f.__qualname__].values())
+
+ def clear_overloads():
+ """Clear all overloads in the registry."""
+ _overload_registry.clear()
+
+
+# This is not a real generic class. Don't use outside annotations.
+Type = typing.Type
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+Awaitable = typing.Awaitable
+Coroutine = typing.Coroutine
+AsyncIterable = typing.AsyncIterable
+AsyncIterator = typing.AsyncIterator
+Deque = typing.Deque
+ContextManager = typing.ContextManager
+AsyncContextManager = typing.AsyncContextManager
+DefaultDict = typing.DefaultDict
+OrderedDict = typing.OrderedDict
+Counter = typing.Counter
+ChainMap = typing.ChainMap
+AsyncGenerator = typing.AsyncGenerator
+Text = typing.Text
+TYPE_CHECKING = typing.TYPE_CHECKING
+
+
+_PROTO_ALLOWLIST = {
+ 'collections.abc': [
+ 'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
+ 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer',
+ ],
+ 'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
+ 'typing_extensions': ['Buffer'],
+}
+
+
+_EXCLUDED_ATTRS = {
+ "__abstractmethods__", "__annotations__", "__weakref__", "_is_protocol",
+ "_is_runtime_protocol", "__dict__", "__slots__", "__parameters__",
+ "__orig_bases__", "__module__", "_MutableMapping__marker", "__doc__",
+ "__subclasshook__", "__orig_class__", "__init__", "__new__",
+ "__protocol_attrs__", "__non_callable_proto_members__",
+ "__match_args__",
+}
+
+if sys.version_info >= (3, 9):
+ _EXCLUDED_ATTRS.add("__class_getitem__")
+
+if sys.version_info >= (3, 12):
+ _EXCLUDED_ATTRS.add("__type_params__")
+
+_EXCLUDED_ATTRS = frozenset(_EXCLUDED_ATTRS)
+
+
+def _get_protocol_attrs(cls):
+ attrs = set()
+ for base in cls.__mro__[:-1]: # without object
+ if base.__name__ in {'Protocol', 'Generic'}:
+ continue
+ annotations = getattr(base, '__annotations__', {})
+ for attr in (*base.__dict__, *annotations):
+ if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS):
+ attrs.add(attr)
+ return attrs
+
+
+def _caller(depth=2):
+ try:
+ return sys._getframe(depth).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError): # For platforms without _getframe()
+ return None
+
+
+# `__match_args__` attribute was removed from protocol members in 3.13,
+# we want to backport this change to older Python versions.
+if sys.version_info >= (3, 13):
+ Protocol = typing.Protocol
+else:
+ def _allow_reckless_class_checks(depth=3):
+ """Allow instance and class checks for special stdlib modules.
+ The abc and functools modules indiscriminately call isinstance() and
+ issubclass() on the whole MRO of a user class, which may contain protocols.
+ """
+ return _caller(depth) in {'abc', 'functools', None}
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ def _type_check_issubclass_arg_1(arg):
+ """Raise TypeError if `arg` is not an instance of `type`
+ in `issubclass(arg, )`.
+
+ In most cases, this is verified by type.__subclasscheck__.
+ Checking it again unnecessarily would slow down issubclass() checks,
+ so, we don't perform this check unless we absolutely have to.
+
+ For various error paths, however,
+ we want to ensure that *this* error message is shown to the user
+ where relevant, rather than a typing.py-specific error message.
+ """
+ if not isinstance(arg, type):
+ # Same error message as for issubclass(1, int).
+ raise TypeError('issubclass() arg 1 must be a class')
+
+ # Inheriting from typing._ProtocolMeta isn't actually desirable,
+ # but is necessary to allow typing.Protocol and typing_extensions.Protocol
+ # to mix without getting TypeErrors about "metaclass conflict"
+ class _ProtocolMeta(type(typing.Protocol)):
+ # This metaclass is somewhat unfortunate,
+ # but is necessary for several reasons...
+ #
+ # NOTE: DO NOT call super() in any methods in this class
+ # That would call the methods on typing._ProtocolMeta on Python 3.8-3.11
+ # and those are slow
+ def __new__(mcls, name, bases, namespace, **kwargs):
+ if name == "Protocol" and len(bases) < 2:
+ pass
+ elif {Protocol, typing.Protocol} & set(bases):
+ for base in bases:
+ if not (
+ base in {object, typing.Generic, Protocol, typing.Protocol}
+ or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, [])
+ or is_protocol(base)
+ ):
+ raise TypeError(
+ f"Protocols can only inherit from other protocols, "
+ f"got {base!r}"
+ )
+ return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
+
+ def __init__(cls, *args, **kwargs):
+ abc.ABCMeta.__init__(cls, *args, **kwargs)
+ if getattr(cls, "_is_protocol", False):
+ cls.__protocol_attrs__ = _get_protocol_attrs(cls)
+
+ def __subclasscheck__(cls, other):
+ if cls is Protocol:
+ return type.__subclasscheck__(cls, other)
+ if (
+ getattr(cls, '_is_protocol', False)
+ and not _allow_reckless_class_checks()
+ ):
+ if not getattr(cls, '_is_runtime_protocol', False):
+ _type_check_issubclass_arg_1(other)
+ raise TypeError(
+ "Instance and class checks can only be used with "
+ "@runtime_checkable protocols"
+ )
+ if (
+ # this attribute is set by @runtime_checkable:
+ cls.__non_callable_proto_members__
+ and cls.__dict__.get("__subclasshook__") is _proto_hook
+ ):
+ _type_check_issubclass_arg_1(other)
+ non_method_attrs = sorted(cls.__non_callable_proto_members__)
+ raise TypeError(
+ "Protocols with non-method members don't support issubclass()."
+ f" Non-method members: {str(non_method_attrs)[1:-1]}."
+ )
+ return abc.ABCMeta.__subclasscheck__(cls, other)
+
+ def __instancecheck__(cls, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if cls is Protocol:
+ return type.__instancecheck__(cls, instance)
+ if not getattr(cls, "_is_protocol", False):
+ # i.e., it's a concrete subclass of a protocol
+ return abc.ABCMeta.__instancecheck__(cls, instance)
+
+ if (
+ not getattr(cls, '_is_runtime_protocol', False) and
+ not _allow_reckless_class_checks()
+ ):
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime_checkable protocols")
+
+ if abc.ABCMeta.__instancecheck__(cls, instance):
+ return True
+
+ for attr in cls.__protocol_attrs__:
+ try:
+ val = inspect.getattr_static(instance, attr)
+ except AttributeError:
+ break
+ # this attribute is set by @runtime_checkable:
+ if val is None and attr not in cls.__non_callable_proto_members__:
+ break
+ else:
+ return True
+
+ return False
+
+ def __eq__(cls, other):
+ # Hack so that typing.Generic.__class_getitem__
+ # treats typing_extensions.Protocol
+ # as equivalent to typing.Protocol
+ if abc.ABCMeta.__eq__(cls, other) is True:
+ return True
+ return cls is Protocol and other is typing.Protocol
+
+ # This has to be defined, or the abc-module cache
+ # complains about classes with this metaclass being unhashable,
+ # if we define only __eq__!
+ def __hash__(cls) -> int:
+ return type.__hash__(cls)
+
+ @classmethod
+ def _proto_hook(cls, other):
+ if not cls.__dict__.get('_is_protocol', False):
+ return NotImplemented
+
+ for attr in cls.__protocol_attrs__:
+ for base in other.__mro__:
+ # Check if the members appears in the class dictionary...
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+
+ # ...or in annotations, if it is a sub-protocol.
+ annotations = getattr(base, '__annotations__', {})
+ if (
+ isinstance(annotations, collections.abc.Mapping)
+ and attr in annotations
+ and is_protocol(other)
+ ):
+ break
+ else:
+ return NotImplemented
+ return True
+
+ class Protocol(typing.Generic, metaclass=_ProtocolMeta):
+ __doc__ = typing.Protocol.__doc__
+ __slots__ = ()
+ _is_protocol = True
+ _is_runtime_protocol = False
+
+ def __init_subclass__(cls, *args, **kwargs):
+ super().__init_subclass__(*args, **kwargs)
+
+ # Determine if this is a protocol or a concrete subclass.
+ if not cls.__dict__.get('_is_protocol', False):
+ cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+ # Set (or override) the protocol subclass hook.
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ # Prohibit instantiation for protocol classes
+ if cls._is_protocol and cls.__init__ is Protocol.__init__:
+ cls.__init__ = _no_init
+
+
+if sys.version_info >= (3, 13):
+ runtime_checkable = typing.runtime_checkable
+else:
+ def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol.
+
+ Such protocol can be used with isinstance() and issubclass().
+ Raise TypeError if applied to a non-protocol class.
+ This allows a simple-minded structural check very similar to
+ one trick ponies in collections.abc such as Iterable.
+
+ For example::
+
+ @runtime_checkable
+ class Closable(Protocol):
+ def close(self): ...
+
+ assert isinstance(open('/some/file'), Closable)
+
+ Warning: this will check only the presence of the required methods,
+ not their type signatures!
+ """
+ if not issubclass(cls, typing.Generic) or not getattr(cls, '_is_protocol', False):
+ raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+ ' got %r' % cls)
+ cls._is_runtime_protocol = True
+
+ # Only execute the following block if it's a typing_extensions.Protocol class.
+ # typing.Protocol classes don't need it.
+ if isinstance(cls, _ProtocolMeta):
+ # PEP 544 prohibits using issubclass()
+ # with protocols that have non-method members.
+ # See gh-113320 for why we compute this attribute here,
+ # rather than in `_ProtocolMeta.__init__`
+ cls.__non_callable_proto_members__ = set()
+ for attr in cls.__protocol_attrs__:
+ try:
+ is_callable = callable(getattr(cls, attr, None))
+ except Exception as e:
+ raise TypeError(
+ f"Failed to determine whether protocol member {attr!r} "
+ "is a method member"
+ ) from e
+ else:
+ if not is_callable:
+ cls.__non_callable_proto_members__.add(attr)
+
+ return cls
+
+
+# The "runtime" alias exists for backwards compatibility.
+runtime = runtime_checkable
+
+
+# Our version of runtime-checkable protocols is faster on Python 3.8-3.11
+if sys.version_info >= (3, 12):
+ SupportsInt = typing.SupportsInt
+ SupportsFloat = typing.SupportsFloat
+ SupportsComplex = typing.SupportsComplex
+ SupportsBytes = typing.SupportsBytes
+ SupportsIndex = typing.SupportsIndex
+ SupportsAbs = typing.SupportsAbs
+ SupportsRound = typing.SupportsRound
+else:
+ @runtime_checkable
+ class SupportsInt(Protocol):
+ """An ABC with one abstract method __int__."""
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __int__(self) -> int:
+ pass
+
+ @runtime_checkable
+ class SupportsFloat(Protocol):
+ """An ABC with one abstract method __float__."""
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __float__(self) -> float:
+ pass
+
+ @runtime_checkable
+ class SupportsComplex(Protocol):
+ """An ABC with one abstract method __complex__."""
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __complex__(self) -> complex:
+ pass
+
+ @runtime_checkable
+ class SupportsBytes(Protocol):
+ """An ABC with one abstract method __bytes__."""
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __bytes__(self) -> bytes:
+ pass
+
+ @runtime_checkable
+ class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __index__(self) -> int:
+ pass
+
+ @runtime_checkable
+ class SupportsAbs(Protocol[T_co]):
+ """
+ An ABC with one abstract method __abs__ that is covariant in its return type.
+ """
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __abs__(self) -> T_co:
+ pass
+
+ @runtime_checkable
+ class SupportsRound(Protocol[T_co]):
+ """
+ An ABC with one abstract method __round__ that is covariant in its return type.
+ """
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __round__(self, ndigits: int = 0) -> T_co:
+ pass
+
+
+def _ensure_subclassable(mro_entries):
+ def inner(func):
+ if sys.implementation.name == "pypy" and sys.version_info < (3, 9):
+ cls_dict = {
+ "__call__": staticmethod(func),
+ "__mro_entries__": staticmethod(mro_entries)
+ }
+ t = type(func.__name__, (), cls_dict)
+ return functools.update_wrapper(t(), func)
+ else:
+ func.__mro_entries__ = mro_entries
+ return func
+ return inner
+
+
+# Update this to something like >=3.13.0b1 if and when
+# PEP 728 is implemented in CPython
+_PEP_728_IMPLEMENTED = False
+
+if _PEP_728_IMPLEMENTED:
+ # The standard library TypedDict in Python 3.8 does not store runtime information
+ # about which (if any) keys are optional. See https://bugs.python.org/issue38834
+ # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
+ # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
+ # The standard library TypedDict below Python 3.11 does not store runtime
+ # information about optional and required keys when using Required or NotRequired.
+ # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
+ # Aaaand on 3.12 we add __orig_bases__ to TypedDict
+ # to enable better runtime introspection.
+ # On 3.13 we deprecate some odd ways of creating TypedDicts.
+ # Also on 3.13, PEP 705 adds the ReadOnly[] qualifier.
+ # PEP 728 (still pending) makes more changes.
+ TypedDict = typing.TypedDict
+ _TypedDictMeta = typing._TypedDictMeta
+ is_typeddict = typing.is_typeddict
+else:
+ # 3.10.0 and later
+ _TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
+
+ def _get_typeddict_qualifiers(annotation_type):
+ while True:
+ annotation_origin = get_origin(annotation_type)
+ if annotation_origin is Annotated:
+ annotation_args = get_args(annotation_type)
+ if annotation_args:
+ annotation_type = annotation_args[0]
+ else:
+ break
+ elif annotation_origin is Required:
+ yield Required
+ annotation_type, = get_args(annotation_type)
+ elif annotation_origin is NotRequired:
+ yield NotRequired
+ annotation_type, = get_args(annotation_type)
+ elif annotation_origin is ReadOnly:
+ yield ReadOnly
+ annotation_type, = get_args(annotation_type)
+ else:
+ break
+
+ class _TypedDictMeta(type):
+ def __new__(cls, name, bases, ns, *, total=True, closed=False):
+ """Create new typed dict class object.
+
+ This method is called when TypedDict is subclassed,
+ or when TypedDict is instantiated. This way
+ TypedDict supports all three syntax forms described in its docstring.
+ Subclasses and instances of TypedDict return actual dictionaries.
+ """
+ for base in bases:
+ if type(base) is not _TypedDictMeta and base is not typing.Generic:
+ raise TypeError('cannot inherit from both a TypedDict type '
+ 'and a non-TypedDict base class')
+
+ if any(issubclass(b, typing.Generic) for b in bases):
+ generic_base = (typing.Generic,)
+ else:
+ generic_base = ()
+
+ # typing.py generally doesn't let you inherit from plain Generic, unless
+ # the name of the class happens to be "Protocol"
+ tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns)
+ tp_dict.__name__ = name
+ if tp_dict.__qualname__ == "Protocol":
+ tp_dict.__qualname__ = name
+
+ if not hasattr(tp_dict, '__orig_bases__'):
+ tp_dict.__orig_bases__ = bases
+
+ annotations = {}
+ own_annotations = ns.get('__annotations__', {})
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ if _TAKES_MODULE:
+ own_annotations = {
+ n: typing._type_check(tp, msg, module=tp_dict.__module__)
+ for n, tp in own_annotations.items()
+ }
+ else:
+ own_annotations = {
+ n: typing._type_check(tp, msg)
+ for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+ readonly_keys = set()
+ mutable_keys = set()
+ extra_items_type = None
+
+ for base in bases:
+ base_dict = base.__dict__
+
+ annotations.update(base_dict.get('__annotations__', {}))
+ required_keys.update(base_dict.get('__required_keys__', ()))
+ optional_keys.update(base_dict.get('__optional_keys__', ()))
+ readonly_keys.update(base_dict.get('__readonly_keys__', ()))
+ mutable_keys.update(base_dict.get('__mutable_keys__', ()))
+ base_extra_items_type = base_dict.get('__extra_items__', None)
+ if base_extra_items_type is not None:
+ extra_items_type = base_extra_items_type
+
+ if closed and extra_items_type is None:
+ extra_items_type = Never
+ if closed and "__extra_items__" in own_annotations:
+ annotation_type = own_annotations.pop("__extra_items__")
+ qualifiers = set(_get_typeddict_qualifiers(annotation_type))
+ if Required in qualifiers:
+ raise TypeError(
+ "Special key __extra_items__ does not support "
+ "Required"
+ )
+ if NotRequired in qualifiers:
+ raise TypeError(
+ "Special key __extra_items__ does not support "
+ "NotRequired"
+ )
+ extra_items_type = annotation_type
+
+ annotations.update(own_annotations)
+ for annotation_key, annotation_type in own_annotations.items():
+ qualifiers = set(_get_typeddict_qualifiers(annotation_type))
+
+ if Required in qualifiers:
+ required_keys.add(annotation_key)
+ elif NotRequired in qualifiers:
+ optional_keys.add(annotation_key)
+ elif total:
+ required_keys.add(annotation_key)
+ else:
+ optional_keys.add(annotation_key)
+ if ReadOnly in qualifiers:
+ mutable_keys.discard(annotation_key)
+ readonly_keys.add(annotation_key)
+ else:
+ mutable_keys.add(annotation_key)
+ readonly_keys.discard(annotation_key)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
+ tp_dict.__readonly_keys__ = frozenset(readonly_keys)
+ tp_dict.__mutable_keys__ = frozenset(mutable_keys)
+ if not hasattr(tp_dict, '__total__'):
+ tp_dict.__total__ = total
+ tp_dict.__closed__ = closed
+ tp_dict.__extra_items__ = extra_items_type
+ return tp_dict
+
+ __call__ = dict # static method
+
+ def __subclasscheck__(cls, other):
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+
+ __instancecheck__ = __subclasscheck__
+
+ _TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
+
+ @_ensure_subclassable(lambda bases: (_TypedDict,))
+ def TypedDict(typename, fields=_marker, /, *, total=True, closed=False, **kwargs):
+ """A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type such that a type checker will expect all
+ instances to have a certain set of keys, where each key is
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime.
+
+ Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports an additional equivalent form::
+
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ By default, all keys must be present in a TypedDict. It is possible
+ to override this by specifying totality::
+
+ class Point2D(TypedDict, total=False):
+ x: int
+ y: int
+
+ This means that a Point2D TypedDict can have any of the keys omitted. A type
+ checker is only expected to support a literal False or True as the value of
+ the total argument. True is the default, and makes all items defined in the
+ class body be required.
+
+ The Required and NotRequired special forms can also be used to mark
+ individual keys as being required or not required::
+
+ class Point2D(TypedDict):
+ x: int # the "x" key must always be present (Required is the default)
+ y: NotRequired[int] # the "y" key can be omitted
+
+ See PEP 655 for more details on Required and NotRequired.
+ """
+ if fields is _marker or fields is None:
+ if fields is _marker:
+ deprecated_thing = "Failing to pass a value for the 'fields' parameter"
+ else:
+ deprecated_thing = "Passing `None` as the 'fields' parameter"
+
+ example = f"`{typename} = TypedDict({typename!r}, {{}})`"
+ deprecation_msg = (
+ f"{deprecated_thing} is deprecated and will be disallowed in "
+ "Python 3.15. To create a TypedDict class with 0 fields "
+ "using the functional syntax, pass an empty dictionary, e.g. "
+ ) + example + "."
+ warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
+ if closed is not False and closed is not True:
+ kwargs["closed"] = closed
+ closed = False
+ fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+ if kwargs:
+ if sys.version_info >= (3, 13):
+ raise TypeError("TypedDict takes no keyword arguments")
+ warnings.warn(
+ "The kwargs-based syntax for TypedDict definitions is deprecated "
+ "in Python 3.11, will be removed in Python 3.13, and may not be "
+ "understood by third-party type checkers.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ ns = {'__annotations__': dict(fields)}
+ module = _caller()
+ if module is not None:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = module
+
+ td = _TypedDictMeta(typename, (), ns, total=total, closed=closed)
+ td.__orig_bases__ = (TypedDict,)
+ return td
+
+ if hasattr(typing, "_TypedDictMeta"):
+ _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
+ else:
+ _TYPEDDICT_TYPES = (_TypedDictMeta,)
+
+ def is_typeddict(tp):
+ """Check if an annotation is a TypedDict class
+
+ For example::
+ class Film(TypedDict):
+ title: str
+ year: int
+
+ is_typeddict(Film) # => True
+ is_typeddict(Union[list, str]) # => False
+ """
+ # On 3.8, this would otherwise return True
+ if hasattr(typing, "TypedDict") and tp is typing.TypedDict:
+ return False
+ return isinstance(tp, _TYPEDDICT_TYPES)
+
+
+if hasattr(typing, "assert_type"):
+ assert_type = typing.assert_type
+
+else:
+ def assert_type(val, typ, /):
+ """Assert (to the type checker) that the value is of the given type.
+
+ When the type checker encounters a call to assert_type(), it
+ emits an error if the value is not of the specified type::
+
+ def greet(name: str) -> None:
+ assert_type(name, str) # ok
+ assert_type(name, int) # type checker error
+
+ At runtime this returns the first argument unchanged and otherwise
+ does nothing.
+ """
+ return val
+
+
+if hasattr(typing, "ReadOnly"): # 3.13+
+ get_type_hints = typing.get_type_hints
+else: # <=3.13
+ # replaces _strip_annotations()
+ def _strip_extras(t):
+ """Strips Annotated, Required and NotRequired from a given type."""
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_extras(t.__origin__)
+ if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired, ReadOnly):
+ return _strip_extras(t.__args__[0])
+ if isinstance(t, typing._GenericAlias):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return t.copy_with(stripped_args)
+ if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return _types.GenericAlias(t.__origin__, stripped_args)
+ if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return functools.reduce(operator.or_, stripped_args)
+
+ return t
+
+ def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+ """Return type hints for an object.
+
+ This is often the same as obj.__annotations__, but it handles
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
+ (unless 'include_extras=True').
+
+ The argument may be a module, class, method, or function. The annotations
+ are returned as a dictionary. For classes, annotations include also
+ inherited members.
+
+ TypeError is raised if the argument is not of a type that can contain
+ annotations, and an empty dictionary is returned if no annotations are
+ present.
+
+ BEWARE -- the behavior of globalns and localns is counterintuitive
+ (unless you are familiar with how eval() and exec() work). The
+ search order is locals first, then globals.
+
+ - If no dict arguments are passed, an attempt is made to use the
+ globals from obj (or the respective module's globals for classes),
+ and these are also used as the locals. If the object does not appear
+ to have globals, an empty dictionary is used.
+
+ - If one dict argument is passed, it is used for both globals and
+ locals.
+
+ - If two dict arguments are passed, they specify globals and
+ locals, respectively.
+ """
+ if hasattr(typing, "Annotated"): # 3.9+
+ hint = typing.get_type_hints(
+ obj, globalns=globalns, localns=localns, include_extras=True
+ )
+ else: # 3.8
+ hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+ if include_extras:
+ return hint
+ return {k: _strip_extras(t) for k, t in hint.items()}
+
+
+# Python 3.9+ has PEP 593 (Annotated)
+if hasattr(typing, 'Annotated'):
+ Annotated = typing.Annotated
+ # Not exported and not a public API, but needed for get_origin() and get_args()
+ # to work.
+ _AnnotatedAlias = typing._AnnotatedAlias
+# 3.8
+else:
+ class _AnnotatedAlias(typing._GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
+ f"{', '.join(repr(a) for a in self.__metadata__)}]")
+
+ def __reduce__(self):
+ return operator.getitem, (
+ Annotated, (self.__origin__,) + self.__metadata__
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ if self.__origin__ != other.__origin__:
+ return False
+ return self.__metadata__ == other.__metadata__
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+ class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type (and will be in
+ the __origin__ field), the remaining arguments are kept as a tuple in
+ the __extra__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @typing._tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ allowed_special_forms = (ClassVar, Final)
+ if get_origin(params[0]) in allowed_special_forms:
+ origin = params[0]
+ else:
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ f"Cannot subclass {cls.__module__}.Annotated"
+ )
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those. Python 3.9's versions don't support
+# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
+if sys.version_info[:2] >= (3, 10):
+ get_origin = typing.get_origin
+ get_args = typing.get_args
+# 3.8-3.9
+else:
+ try:
+ # 3.9+
+ from typing import _BaseGenericAlias
+ except ImportError:
+ _BaseGenericAlias = typing._GenericAlias
+ try:
+ # 3.9+
+ from typing import GenericAlias as _typing_GenericAlias
+ except ImportError:
+ _typing_GenericAlias = typing._GenericAlias
+
+ def get_origin(tp):
+ """Get the unsubscripted version of a type.
+
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
+
+ get_origin(Literal[42]) is Literal
+ get_origin(int) is None
+ get_origin(ClassVar[int]) is ClassVar
+ get_origin(Generic) is Generic
+ get_origin(Generic[T]) is Generic
+ get_origin(Union[T, int]) is Union
+ get_origin(List[Tuple[T, T]][int]) == list
+ get_origin(P.args) is P
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
+ ParamSpecArgs, ParamSpecKwargs)):
+ return tp.__origin__
+ if tp is typing.Generic:
+ return typing.Generic
+ return None
+
+ def get_args(tp):
+ """Get type arguments with all substitutions performed.
+
+ For unions, basic simplifications used by Union constructor are performed.
+ Examples::
+ get_args(Dict[str, int]) == (str, int)
+ get_args(int) == ()
+ get_args(Union[int, Union[T, int], str][int]) == (int, str)
+ get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+ get_args(Callable[[], T][int]) == ([], int)
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
+ if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
+ if getattr(tp, "_special", False):
+ return ()
+ res = tp.__args__
+ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ res = (list(res[:-1]), res[-1])
+ return res
+ return ()
+
+
+# 3.10+
+if hasattr(typing, 'TypeAlias'):
+ TypeAlias = typing.TypeAlias
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_ExtensionsSpecialForm
+ def TypeAlias(self, parameters):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ raise TypeError(f"{self} is not subscriptable")
+# 3.8
+else:
+ TypeAlias = _ExtensionsSpecialForm(
+ 'TypeAlias',
+ doc="""Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example
+ above."""
+ )
+
+
+def _set_default(type_param, default):
+ if isinstance(default, (tuple, list)):
+ type_param.__default__ = tuple((typing._type_check(d, "Default must be a type")
+ for d in default))
+ elif default != _marker:
+ if isinstance(type_param, ParamSpec) and default is ...: # ... not valid <3.11
+ type_param.__default__ = default
+ else:
+ type_param.__default__ = typing._type_check(default, "Default must be a type")
+ else:
+ type_param.__default__ = None
+
+
+def _set_module(typevarlike):
+ # for pickling:
+ def_mod = _caller(depth=3)
+ if def_mod != 'typing_extensions':
+ typevarlike.__module__ = def_mod
+
+
+class _DefaultMixin:
+ """Mixin for TypeVarLike defaults."""
+
+ __slots__ = ()
+ __init__ = _set_default
+
+
+# Classes using this metaclass must provide a _backported_typevarlike ClassVar
+class _TypeVarLikeMeta(type):
+ def __instancecheck__(cls, __instance: Any) -> bool:
+ return isinstance(__instance, cls._backported_typevarlike)
+
+
+# Add default and infer_variance parameters from PEP 696 and 695
+class TypeVar(metaclass=_TypeVarLikeMeta):
+ """Type variable."""
+
+ _backported_typevarlike = typing.TypeVar
+
+ def __new__(cls, name, *constraints, bound=None,
+ covariant=False, contravariant=False,
+ default=_marker, infer_variance=False):
+ if hasattr(typing, "TypeAliasType"):
+ # PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar
+ typevar = typing.TypeVar(name, *constraints, bound=bound,
+ covariant=covariant, contravariant=contravariant,
+ infer_variance=infer_variance)
+ else:
+ typevar = typing.TypeVar(name, *constraints, bound=bound,
+ covariant=covariant, contravariant=contravariant)
+ if infer_variance and (covariant or contravariant):
+ raise ValueError("Variance cannot be specified with infer_variance.")
+ typevar.__infer_variance__ = infer_variance
+ _set_default(typevar, default)
+ _set_module(typevar)
+ return typevar
+
+ def __init_subclass__(cls) -> None:
+ raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type")
+
+
+# Python 3.10+ has PEP 612
+if hasattr(typing, 'ParamSpecArgs'):
+ ParamSpecArgs = typing.ParamSpecArgs
+ ParamSpecKwargs = typing.ParamSpecKwargs
+# 3.8-3.9
+else:
+ class _Immutable:
+ """Mixin to indicate that object should not be copied."""
+ __slots__ = ()
+
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ class ParamSpecArgs(_Immutable):
+ """The args for a ParamSpec object.
+
+ Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+ ParamSpecArgs objects have a reference back to their ParamSpec:
+
+ P.args.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.args"
+
+ def __eq__(self, other):
+ if not isinstance(other, ParamSpecArgs):
+ return NotImplemented
+ return self.__origin__ == other.__origin__
+
+ class ParamSpecKwargs(_Immutable):
+ """The kwargs for a ParamSpec object.
+
+ Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+ ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+ P.kwargs.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.kwargs"
+
+ def __eq__(self, other):
+ if not isinstance(other, ParamSpecKwargs):
+ return NotImplemented
+ return self.__origin__ == other.__origin__
+
+# 3.10+
+if hasattr(typing, 'ParamSpec'):
+
+ # Add default parameter - PEP 696
+ class ParamSpec(metaclass=_TypeVarLikeMeta):
+ """Parameter specification."""
+
+ _backported_typevarlike = typing.ParamSpec
+
+ def __new__(cls, name, *, bound=None,
+ covariant=False, contravariant=False,
+ infer_variance=False, default=_marker):
+ if hasattr(typing, "TypeAliasType"):
+ # PEP 695 implemented, can pass infer_variance to typing.TypeVar
+ paramspec = typing.ParamSpec(name, bound=bound,
+ covariant=covariant,
+ contravariant=contravariant,
+ infer_variance=infer_variance)
+ else:
+ paramspec = typing.ParamSpec(name, bound=bound,
+ covariant=covariant,
+ contravariant=contravariant)
+ paramspec.__infer_variance__ = infer_variance
+
+ _set_default(paramspec, default)
+ _set_module(paramspec)
+ return paramspec
+
+ def __init_subclass__(cls) -> None:
+ raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type")
+
+# 3.8-3.9
+else:
+
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class ParamSpec(list, _DefaultMixin):
+ """Parameter specification variable.
+
+ Usage::
+
+ P = ParamSpec('P')
+
+ Parameter specification variables exist primarily for the benefit of static
+ type checkers. They are used to forward the parameter types of one
+ callable to another callable, a pattern commonly found in higher order
+ functions and decorators. They are only valid when used in ``Concatenate``,
+ or s the first argument to ``Callable``. In Python 3.10 and higher,
+ they are also supported in user-defined Generics at runtime.
+ See class Generic for more information on generic types. An
+ example for annotating a decorator::
+
+ T = TypeVar('T')
+ P = ParamSpec('P')
+
+ def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+ '''A type-safe decorator to add logging to a function.'''
+ def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+ logging.info(f'{f.__name__} was called')
+ return f(*args, **kwargs)
+ return inner
+
+ @add_logging
+ def add_two(x: float, y: float) -> float:
+ '''Add two numbers together.'''
+ return x + y
+
+ Parameter specification variables defined with covariant=True or
+ contravariant=True can be used to declare covariant or contravariant
+ generic types. These keyword arguments are valid, but their actual semantics
+ are yet to be decided. See PEP 612 for details.
+
+ Parameter specification variables can be introspected. e.g.:
+
+ P.__name__ == 'T'
+ P.__bound__ == None
+ P.__covariant__ == False
+ P.__contravariant__ == False
+
+ Note that only parameter specification variables defined in global scope can
+ be pickled.
+ """
+
+ # Trick Generic __parameters__.
+ __class__ = typing.TypeVar
+
+ @property
+ def args(self):
+ return ParamSpecArgs(self)
+
+ @property
+ def kwargs(self):
+ return ParamSpecKwargs(self)
+
+ def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
+ infer_variance=False, default=_marker):
+ super().__init__([self])
+ self.__name__ = name
+ self.__covariant__ = bool(covariant)
+ self.__contravariant__ = bool(contravariant)
+ self.__infer_variance__ = bool(infer_variance)
+ if bound:
+ self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
+ else:
+ self.__bound__ = None
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ def_mod = _caller()
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ def __repr__(self):
+ if self.__infer_variance__:
+ prefix = ''
+ elif self.__covariant__:
+ prefix = '+'
+ elif self.__contravariant__:
+ prefix = '-'
+ else:
+ prefix = '~'
+ return prefix + self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ # Hack to get typing._type_check to pass.
+ def __call__(self, *args, **kwargs):
+ pass
+
+
+# 3.8-3.9
+if not hasattr(typing, 'Concatenate'):
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class _ConcatenateGenericAlias(list):
+
+ # Trick Generic into looking into this for __parameters__.
+ __class__ = typing._GenericAlias
+
+ # Flag in 3.8.
+ _special = False
+
+ def __init__(self, origin, args):
+ super().__init__(args)
+ self.__origin__ = origin
+ self.__args__ = args
+
+ def __repr__(self):
+ _type_repr = typing._type_repr
+ return (f'{_type_repr(self.__origin__)}'
+ f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__args__))
+
+ # Hack to get typing._type_check to pass in Generic.
+ def __call__(self, *args, **kwargs):
+ pass
+
+ @property
+ def __parameters__(self):
+ return tuple(
+ tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
+ )
+
+
+# 3.8-3.9
+@typing._tp_cache
+def _concatenate_getitem(self, parameters):
+ if parameters == ():
+ raise TypeError("Cannot take a Concatenate of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if not isinstance(parameters[-1], ParamSpec):
+ raise TypeError("The last parameter to Concatenate should be a "
+ "ParamSpec variable.")
+ msg = "Concatenate[arg, ...]: each arg must be a type."
+ parameters = tuple(typing._type_check(p, msg) for p in parameters)
+ return _ConcatenateGenericAlias(self, parameters)
+
+
+# 3.10+
+if hasattr(typing, 'Concatenate'):
+ Concatenate = typing.Concatenate
+ _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa: F811
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_ExtensionsSpecialForm
+ def Concatenate(self, parameters):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ return _concatenate_getitem(self, parameters)
+# 3.8
+else:
+ class _ConcatenateForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateForm(
+ 'Concatenate',
+ doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """)
+
+# 3.10+
+if hasattr(typing, 'TypeGuard'):
+ TypeGuard = typing.TypeGuard
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_ExtensionsSpecialForm
+ def TypeGuard(self, parameters):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+ item = typing._type_check(parameters, f'{self} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+# 3.8
+else:
+ class _TypeGuardForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type')
+ return typing._GenericAlias(self, (item,))
+
+ TypeGuard = _TypeGuardForm(
+ 'TypeGuard',
+ doc="""Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """)
+
+# 3.13+
+if hasattr(typing, 'TypeIs'):
+ TypeIs = typing.TypeIs
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_ExtensionsSpecialForm
+ def TypeIs(self, parameters):
+ """Special typing form used to annotate the return type of a user-defined
+ type narrower function. ``TypeIs`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeIs[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeIs`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the intersection of the type inside ``TypeGuard`` and the argument's
+ previously known type.
+
+ For example::
+
+ def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
+ return hasattr(val, '__await__')
+
+ def f(val: Union[int, Awaitable[int]]) -> int:
+ if is_awaitable(val):
+ assert_type(val, Awaitable[int])
+ else:
+ assert_type(val, int)
+
+ ``TypeIs`` also works with type variables. For more information, see
+ PEP 742 (Narrowing types with TypeIs).
+ """
+ item = typing._type_check(parameters, f'{self} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+# 3.8
+else:
+ class _TypeIsForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type')
+ return typing._GenericAlias(self, (item,))
+
+ TypeIs = _TypeIsForm(
+ 'TypeIs',
+ doc="""Special typing form used to annotate the return type of a user-defined
+ type narrower function. ``TypeIs`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeIs`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeIs[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeIs`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the intersection of the type inside ``TypeGuard`` and the argument's
+ previously known type.
+
+ For example::
+
+ def is_awaitable(val: object) -> TypeIs[Awaitable[Any]]:
+ return hasattr(val, '__await__')
+
+ def f(val: Union[int, Awaitable[int]]) -> int:
+ if is_awaitable(val):
+ assert_type(val, Awaitable[int])
+ else:
+ assert_type(val, int)
+
+ ``TypeIs`` also works with type variables. For more information, see
+ PEP 742 (Narrowing types with TypeIs).
+ """)
+
+
+# Vendored from cpython typing._SpecialFrom
+class _SpecialForm(typing._Final, _root=True):
+ __slots__ = ('_name', '__doc__', '_getitem')
+
+ def __init__(self, getitem):
+ self._getitem = getitem
+ self._name = getitem.__name__
+ self.__doc__ = getitem.__doc__
+
+ def __getattr__(self, item):
+ if item in {'__name__', '__qualname__'}:
+ return self._name
+
+ raise AttributeError(item)
+
+ def __mro_entries__(self, bases):
+ raise TypeError(f"Cannot subclass {self!r}")
+
+ def __repr__(self):
+ return f'typing_extensions.{self._name}'
+
+ def __reduce__(self):
+ return self._name
+
+ def __call__(self, *args, **kwds):
+ raise TypeError(f"Cannot instantiate {self!r}")
+
+ def __or__(self, other):
+ return typing.Union[self, other]
+
+ def __ror__(self, other):
+ return typing.Union[other, self]
+
+ def __instancecheck__(self, obj):
+ raise TypeError(f"{self} cannot be used with isinstance()")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError(f"{self} cannot be used with issubclass()")
+
+ @typing._tp_cache
+ def __getitem__(self, parameters):
+ return self._getitem(self, parameters)
+
+
+if hasattr(typing, "LiteralString"): # 3.11+
+ LiteralString = typing.LiteralString
+else:
+ @_SpecialForm
+ def LiteralString(self, params):
+ """Represents an arbitrary literal string.
+
+ Example::
+
+ from typing_extensions import LiteralString
+
+ def query(sql: LiteralString) -> ...:
+ ...
+
+ query("SELECT * FROM table") # ok
+ query(f"SELECT * FROM {input()}") # not ok
+
+ See PEP 675 for details.
+
+ """
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Self"): # 3.11+
+ Self = typing.Self
+else:
+ @_SpecialForm
+ def Self(self, params):
+ """Used to spell the type of "self" in classes.
+
+ Example::
+
+ from typing import Self
+
+ class ReturnsSelf:
+ def parse(self, data: bytes) -> Self:
+ ...
+ return self
+
+ """
+
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Never"): # 3.11+
+ Never = typing.Never
+else:
+ @_SpecialForm
+ def Never(self, params):
+ """The bottom type, a type that has no members.
+
+ This can be used to define a function that should never be
+ called, or a function that never returns::
+
+ from typing_extensions import Never
+
+ def never_call_me(arg: Never) -> None:
+ pass
+
+ def int_or_str(arg: int | str) -> None:
+ never_call_me(arg) # type checker error
+ match arg:
+ case int():
+ print("It's an int")
+ case str():
+ print("It's a str")
+ case _:
+ never_call_me(arg) # ok, arg is of type Never
+
+ """
+
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, 'Required'): # 3.11+
+ Required = typing.Required
+ NotRequired = typing.NotRequired
+elif sys.version_info[:2] >= (3, 9): # 3.9-3.10
+ @_ExtensionsSpecialForm
+ def Required(self, parameters):
+ """A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ @_ExtensionsSpecialForm
+ def NotRequired(self, parameters):
+ """A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+else: # 3.8
+ class _RequiredForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ Required = _RequiredForm(
+ 'Required',
+ doc="""A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """)
+ NotRequired = _RequiredForm(
+ 'NotRequired',
+ doc="""A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """)
+
+
+if hasattr(typing, 'ReadOnly'):
+ ReadOnly = typing.ReadOnly
+elif sys.version_info[:2] >= (3, 9): # 3.9-3.12
+ @_ExtensionsSpecialForm
+ def ReadOnly(self, parameters):
+ """A special typing construct to mark an item of a TypedDict as read-only.
+
+ For example:
+
+ class Movie(TypedDict):
+ title: ReadOnly[str]
+ year: int
+
+ def mutate_movie(m: Movie) -> None:
+ m["year"] = 1992 # allowed
+ m["title"] = "The Matrix" # typechecker error
+
+ There is no runtime checking for this property.
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+else: # 3.8
+ class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ ReadOnly = _ReadOnlyForm(
+ 'ReadOnly',
+ doc="""A special typing construct to mark a key of a TypedDict as read-only.
+
+ For example:
+
+ class Movie(TypedDict):
+ title: ReadOnly[str]
+ year: int
+
+ def mutate_movie(m: Movie) -> None:
+ m["year"] = 1992 # allowed
+ m["title"] = "The Matrix" # typechecker error
+
+ There is no runtime checking for this propery.
+ """)
+
+
+_UNPACK_DOC = """\
+Type unpack operator.
+
+The type unpack operator takes the child types from some container type,
+such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For
+example:
+
+ # For some generic class `Foo`:
+ Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]
+
+ Ts = TypeVarTuple('Ts')
+ # Specifies that `Bar` is generic in an arbitrary number of types.
+ # (Think of `Ts` as a tuple of an arbitrary number of individual
+ # `TypeVar`s, which the `Unpack` is 'pulling out' directly into the
+ # `Generic[]`.)
+ class Bar(Generic[Unpack[Ts]]): ...
+ Bar[int] # Valid
+ Bar[int, str] # Also valid
+
+From Python 3.11, this can also be done using the `*` operator:
+
+ Foo[*tuple[int, str]]
+ class Bar(Generic[*Ts]): ...
+
+The operator can also be used along with a `TypedDict` to annotate
+`**kwargs` in a function signature. For instance:
+
+ class Movie(TypedDict):
+ name: str
+ year: int
+
+ # This function expects two keyword arguments - *name* of type `str` and
+ # *year* of type `int`.
+ def foo(**kwargs: Unpack[Movie]): ...
+
+Note that there is only some runtime checking of this operator. Not
+everything the runtime allows may be accepted by static type checkers.
+
+For more information, see PEP 646 and PEP 692.
+"""
+
+
+if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[]
+ Unpack = typing.Unpack
+
+ def _is_unpack(obj):
+ return get_origin(obj) is Unpack
+
+elif sys.version_info[:2] >= (3, 9): # 3.9+
+ class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True):
+ def __init__(self, getitem):
+ super().__init__(getitem)
+ self.__doc__ = _UNPACK_DOC
+
+ class _UnpackAlias(typing._GenericAlias, _root=True):
+ __class__ = typing.TypeVar
+
+ @_UnpackSpecialForm
+ def Unpack(self, parameters):
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return _UnpackAlias(self, (item,))
+
+ def _is_unpack(obj):
+ return isinstance(obj, _UnpackAlias)
+
+else: # 3.8
+ class _UnpackAlias(typing._GenericAlias, _root=True):
+ __class__ = typing.TypeVar
+
+ class _UnpackForm(_ExtensionsSpecialForm, _root=True):
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return _UnpackAlias(self, (item,))
+
+ Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC)
+
+ def _is_unpack(obj):
+ return isinstance(obj, _UnpackAlias)
+
+
+if hasattr(typing, "TypeVarTuple"): # 3.11+
+
+ # Add default parameter - PEP 696
+ class TypeVarTuple(metaclass=_TypeVarLikeMeta):
+ """Type variable tuple."""
+
+ _backported_typevarlike = typing.TypeVarTuple
+
+ def __new__(cls, name, *, default=_marker):
+ tvt = typing.TypeVarTuple(name)
+ _set_default(tvt, default)
+ _set_module(tvt)
+ return tvt
+
+ def __init_subclass__(self, *args, **kwds):
+ raise TypeError("Cannot subclass special typing classes")
+
+else: # <=3.10
+ class TypeVarTuple(_DefaultMixin):
+ """Type variable tuple.
+
+ Usage::
+
+ Ts = TypeVarTuple('Ts')
+
+ In the same way that a normal type variable is a stand-in for a single
+ type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
+ type such as ``Tuple[int, str]``.
+
+ Type variable tuples can be used in ``Generic`` declarations.
+ Consider the following example::
+
+ class Array(Generic[*Ts]): ...
+
+ The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
+ where ``T1`` and ``T2`` are type variables. To use these type variables
+ as type parameters of ``Array``, we must *unpack* the type variable tuple using
+ the star operator: ``*Ts``. The signature of ``Array`` then behaves
+ as if we had simply written ``class Array(Generic[T1, T2]): ...``.
+ In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
+ us to parameterise the class with an *arbitrary* number of type parameters.
+
+ Type variable tuples can be used anywhere a normal ``TypeVar`` can.
+ This includes class definitions, as shown above, as well as function
+ signatures and variable annotations::
+
+ class Array(Generic[*Ts]):
+
+ def __init__(self, shape: Tuple[*Ts]):
+ self._shape: Tuple[*Ts] = shape
+
+ def get_shape(self) -> Tuple[*Ts]:
+ return self._shape
+
+ shape = (Height(480), Width(640))
+ x: Array[Height, Width] = Array(shape)
+ y = abs(x) # Inferred type is Array[Height, Width]
+ z = x + x # ... is Array[Height, Width]
+ x.get_shape() # ... is tuple[Height, Width]
+
+ """
+
+ # Trick Generic __parameters__.
+ __class__ = typing.TypeVar
+
+ def __iter__(self):
+ yield self.__unpacked__
+
+ def __init__(self, name, *, default=_marker):
+ self.__name__ = name
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ def_mod = _caller()
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ self.__unpacked__ = Unpack[self]
+
+ def __repr__(self):
+ return self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ def __init_subclass__(self, *args, **kwds):
+ if '_root' not in kwds:
+ raise TypeError("Cannot subclass special typing classes")
+
+
+if hasattr(typing, "reveal_type"): # 3.11+
+ reveal_type = typing.reveal_type
+else: # <=3.10
+ def reveal_type(obj: T, /) -> T:
+ """Reveal the inferred type of a variable.
+
+ When a static type checker encounters a call to ``reveal_type()``,
+ it will emit the inferred type of the argument::
+
+ x: int = 1
+ reveal_type(x)
+
+ Running a static type checker (e.g., ``mypy``) on this example
+ will produce output similar to 'Revealed type is "builtins.int"'.
+
+ At runtime, the function prints the runtime type of the
+ argument and returns it unchanged.
+
+ """
+ print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
+ return obj
+
+
+if hasattr(typing, "assert_never"): # 3.11+
+ assert_never = typing.assert_never
+else: # <=3.10
+ def assert_never(arg: Never, /) -> Never:
+ """Assert to the type checker that a line of code is unreachable.
+
+ Example::
+
+ def int_or_str(arg: int | str) -> None:
+ match arg:
+ case int():
+ print("It's an int")
+ case str():
+ print("It's a str")
+ case _:
+ assert_never(arg)
+
+ If a type checker finds that a call to assert_never() is
+ reachable, it will emit an error.
+
+ At runtime, this throws an exception when called.
+
+ """
+ raise AssertionError("Expected code to be unreachable")
+
+
+if sys.version_info >= (3, 12): # 3.12+
+ # dataclass_transform exists in 3.11 but lacks the frozen_default parameter
+ dataclass_transform = typing.dataclass_transform
+else: # <=3.11
+ def dataclass_transform(
+ *,
+ eq_default: bool = True,
+ order_default: bool = False,
+ kw_only_default: bool = False,
+ frozen_default: bool = False,
+ field_specifiers: typing.Tuple[
+ typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
+ ...
+ ] = (),
+ **kwargs: typing.Any,
+ ) -> typing.Callable[[T], T]:
+ """Decorator that marks a function, class, or metaclass as providing
+ dataclass-like behavior.
+
+ Example:
+
+ from typing_extensions import dataclass_transform
+
+ _T = TypeVar("_T")
+
+ # Used on a decorator function
+ @dataclass_transform()
+ def create_model(cls: type[_T]) -> type[_T]:
+ ...
+ return cls
+
+ @create_model
+ class CustomerModel:
+ id: int
+ name: str
+
+ # Used on a base class
+ @dataclass_transform()
+ class ModelBase: ...
+
+ class CustomerModel(ModelBase):
+ id: int
+ name: str
+
+ # Used on a metaclass
+ @dataclass_transform()
+ class ModelMeta(type): ...
+
+ class ModelBase(metaclass=ModelMeta): ...
+
+ class CustomerModel(ModelBase):
+ id: int
+ name: str
+
+ Each of the ``CustomerModel`` classes defined in this example will now
+ behave similarly to a dataclass created with the ``@dataclasses.dataclass``
+ decorator. For example, the type checker will synthesize an ``__init__``
+ method.
+
+ The arguments to this decorator can be used to customize this behavior:
+ - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
+ True or False if it is omitted by the caller.
+ - ``order_default`` indicates whether the ``order`` parameter is
+ assumed to be True or False if it is omitted by the caller.
+ - ``kw_only_default`` indicates whether the ``kw_only`` parameter is
+ assumed to be True or False if it is omitted by the caller.
+ - ``frozen_default`` indicates whether the ``frozen`` parameter is
+ assumed to be True or False if it is omitted by the caller.
+ - ``field_specifiers`` specifies a static list of supported classes
+ or functions that describe fields, similar to ``dataclasses.field()``.
+
+ At runtime, this decorator records its arguments in the
+ ``__dataclass_transform__`` attribute on the decorated object.
+
+ See PEP 681 for details.
+
+ """
+ def decorator(cls_or_fn):
+ cls_or_fn.__dataclass_transform__ = {
+ "eq_default": eq_default,
+ "order_default": order_default,
+ "kw_only_default": kw_only_default,
+ "frozen_default": frozen_default,
+ "field_specifiers": field_specifiers,
+ "kwargs": kwargs,
+ }
+ return cls_or_fn
+ return decorator
+
+
+if hasattr(typing, "override"): # 3.12+
+ override = typing.override
+else: # <=3.11
+ _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
+
+ def override(arg: _F, /) -> _F:
+ """Indicate that a method is intended to override a method in a base class.
+
+ Usage:
+
+ class Base:
+ def method(self) -> None:
+ pass
+
+ class Child(Base):
+ @override
+ def method(self) -> None:
+ super().method()
+
+ When this decorator is applied to a method, the type checker will
+ validate that it overrides a method with the same name on a base class.
+ This helps prevent bugs that may occur when a base class is changed
+ without an equivalent change to a child class.
+
+ There is no runtime checking of these properties. The decorator
+ sets the ``__override__`` attribute to ``True`` on the decorated object
+ to allow runtime introspection.
+
+ See PEP 698 for details.
+
+ """
+ try:
+ arg.__override__ = True
+ except (AttributeError, TypeError):
+ # Skip the attribute silently if it is not writable.
+ # AttributeError happens if the object has __slots__ or a
+ # read-only property, TypeError if it's a builtin class.
+ pass
+ return arg
+
+
+if hasattr(warnings, "deprecated"):
+ deprecated = warnings.deprecated
+else:
+ _T = typing.TypeVar("_T")
+
+ class deprecated:
+ """Indicate that a class, function or overload is deprecated.
+
+ When this decorator is applied to an object, the type checker
+ will generate a diagnostic on usage of the deprecated object.
+
+ Usage:
+
+ @deprecated("Use B instead")
+ class A:
+ pass
+
+ @deprecated("Use g instead")
+ def f():
+ pass
+
+ @overload
+ @deprecated("int support is deprecated")
+ def g(x: int) -> int: ...
+ @overload
+ def g(x: str) -> int: ...
+
+ The warning specified by *category* will be emitted at runtime
+ on use of deprecated objects. For functions, that happens on calls;
+ for classes, on instantiation and on creation of subclasses.
+ If the *category* is ``None``, no warning is emitted at runtime.
+ The *stacklevel* determines where the
+ warning is emitted. If it is ``1`` (the default), the warning
+ is emitted at the direct caller of the deprecated object; if it
+ is higher, it is emitted further up the stack.
+ Static type checker behavior is not affected by the *category*
+ and *stacklevel* arguments.
+
+ The deprecation message passed to the decorator is saved in the
+ ``__deprecated__`` attribute on the decorated object.
+ If applied to an overload, the decorator
+ must be after the ``@overload`` decorator for the attribute to
+ exist on the overload as returned by ``get_overloads()``.
+
+ See PEP 702 for details.
+
+ """
+ def __init__(
+ self,
+ message: str,
+ /,
+ *,
+ category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
+ stacklevel: int = 1,
+ ) -> None:
+ if not isinstance(message, str):
+ raise TypeError(
+ "Expected an object of type str for 'message', not "
+ f"{type(message).__name__!r}"
+ )
+ self.message = message
+ self.category = category
+ self.stacklevel = stacklevel
+
+ def __call__(self, arg: _T, /) -> _T:
+ # Make sure the inner functions created below don't
+ # retain a reference to self.
+ msg = self.message
+ category = self.category
+ stacklevel = self.stacklevel
+ if category is None:
+ arg.__deprecated__ = msg
+ return arg
+ elif isinstance(arg, type):
+ import functools
+ from types import MethodType
+
+ original_new = arg.__new__
+
+ @functools.wraps(original_new)
+ def __new__(cls, *args, **kwargs):
+ if cls is arg:
+ warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
+ if original_new is not object.__new__:
+ return original_new(cls, *args, **kwargs)
+ # Mirrors a similar check in object.__new__.
+ elif cls.__init__ is object.__init__ and (args or kwargs):
+ raise TypeError(f"{cls.__name__}() takes no arguments")
+ else:
+ return original_new(cls)
+
+ arg.__new__ = staticmethod(__new__)
+
+ original_init_subclass = arg.__init_subclass__
+ # We need slightly different behavior if __init_subclass__
+ # is a bound method (likely if it was implemented in Python)
+ if isinstance(original_init_subclass, MethodType):
+ original_init_subclass = original_init_subclass.__func__
+
+ @functools.wraps(original_init_subclass)
+ def __init_subclass__(*args, **kwargs):
+ warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
+ return original_init_subclass(*args, **kwargs)
+
+ arg.__init_subclass__ = classmethod(__init_subclass__)
+ # Or otherwise, which likely means it's a builtin such as
+ # object's implementation of __init_subclass__.
+ else:
+ @functools.wraps(original_init_subclass)
+ def __init_subclass__(*args, **kwargs):
+ warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
+ return original_init_subclass(*args, **kwargs)
+
+ arg.__init_subclass__ = __init_subclass__
+
+ arg.__deprecated__ = __new__.__deprecated__ = msg
+ __init_subclass__.__deprecated__ = msg
+ return arg
+ elif callable(arg):
+ import functools
+
+ @functools.wraps(arg)
+ def wrapper(*args, **kwargs):
+ warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
+ return arg(*args, **kwargs)
+
+ arg.__deprecated__ = wrapper.__deprecated__ = msg
+ return wrapper
+ else:
+ raise TypeError(
+ "@deprecated decorator with non-None category must be applied to "
+ f"a class or callable, not {arg!r}"
+ )
+
+
+# We have to do some monkey patching to deal with the dual nature of
+# Unpack/TypeVarTuple:
+# - We want Unpack to be a kind of TypeVar so it gets accepted in
+# Generic[Unpack[Ts]]
+# - We want it to *not* be treated as a TypeVar for the purposes of
+# counting generic parameters, so that when we subscript a generic,
+# the runtime doesn't try to substitute the Unpack with the subscripted type.
+if not hasattr(typing, "TypeVarTuple"):
+ def _check_generic(cls, parameters, elen=_marker):
+ """Check correct count for parameters of a generic cls (internal helper).
+
+ This gives a nice error message in case of count mismatch.
+ """
+ if not elen:
+ raise TypeError(f"{cls} is not a generic class")
+ if elen is _marker:
+ if not hasattr(cls, "__parameters__") or not cls.__parameters__:
+ raise TypeError(f"{cls} is not a generic class")
+ elen = len(cls.__parameters__)
+ alen = len(parameters)
+ if alen != elen:
+ expect_val = elen
+ if hasattr(cls, "__parameters__"):
+ parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
+ num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
+ if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
+ return
+
+ # deal with TypeVarLike defaults
+ # required TypeVarLikes cannot appear after a defaulted one.
+ if alen < elen:
+ # since we validate TypeVarLike default in _collect_type_vars
+ # or _collect_parameters we can safely check parameters[alen]
+ if getattr(parameters[alen], '__default__', None) is not None:
+ return
+
+ num_default_tv = sum(getattr(p, '__default__', None)
+ is not None for p in parameters)
+
+ elen -= num_default_tv
+
+ expect_val = f"at least {elen}"
+
+ things = "arguments" if sys.version_info >= (3, 10) else "parameters"
+ raise TypeError(f"Too {'many' if alen > elen else 'few'} {things}"
+ f" for {cls}; actual {alen}, expected {expect_val}")
+else:
+ # Python 3.11+
+
+ def _check_generic(cls, parameters, elen):
+ """Check correct count for parameters of a generic cls (internal helper).
+
+ This gives a nice error message in case of count mismatch.
+ """
+ if not elen:
+ raise TypeError(f"{cls} is not a generic class")
+ alen = len(parameters)
+ if alen != elen:
+ expect_val = elen
+ if hasattr(cls, "__parameters__"):
+ parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
+
+ # deal with TypeVarLike defaults
+ # required TypeVarLikes cannot appear after a defaulted one.
+ if alen < elen:
+ # since we validate TypeVarLike default in _collect_type_vars
+ # or _collect_parameters we can safely check parameters[alen]
+ if getattr(parameters[alen], '__default__', None) is not None:
+ return
+
+ num_default_tv = sum(getattr(p, '__default__', None)
+ is not None for p in parameters)
+
+ elen -= num_default_tv
+
+ expect_val = f"at least {elen}"
+
+ raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments"
+ f" for {cls}; actual {alen}, expected {expect_val}")
+
+typing._check_generic = _check_generic
+
+# Python 3.11+ _collect_type_vars was renamed to _collect_parameters
+if hasattr(typing, '_collect_type_vars'):
+ def _collect_type_vars(types, typevar_types=None):
+ """Collect all type variable contained in types in order of
+ first appearance (lexicographic order). For example::
+
+ _collect_type_vars((T, List[S, T])) == (T, S)
+ """
+ if typevar_types is None:
+ typevar_types = typing.TypeVar
+ tvars = []
+ # required TypeVarLike cannot appear after TypeVarLike with default
+ default_encountered = False
+ for t in types:
+ if (
+ isinstance(t, typevar_types) and
+ t not in tvars and
+ not _is_unpack(t)
+ ):
+ if getattr(t, '__default__', None) is not None:
+ default_encountered = True
+ elif default_encountered:
+ raise TypeError(f'Type parameter {t!r} without a default'
+ ' follows type parameter with a default')
+
+ tvars.append(t)
+ if _should_collect_from_parameters(t):
+ tvars.extend([t for t in t.__parameters__ if t not in tvars])
+ return tuple(tvars)
+
+ typing._collect_type_vars = _collect_type_vars
+else:
+ def _collect_parameters(args):
+ """Collect all type variables and parameter specifications in args
+ in order of first appearance (lexicographic order).
+
+ For example::
+
+ assert _collect_parameters((T, Callable[P, T])) == (T, P)
+ """
+ parameters = []
+ # required TypeVarLike cannot appear after TypeVarLike with default
+ default_encountered = False
+ for t in args:
+ if isinstance(t, type):
+ # We don't want __parameters__ descriptor of a bare Python class.
+ pass
+ elif isinstance(t, tuple):
+ # `t` might be a tuple, when `ParamSpec` is substituted with
+ # `[T, int]`, or `[int, *Ts]`, etc.
+ for x in t:
+ for collected in _collect_parameters([x]):
+ if collected not in parameters:
+ parameters.append(collected)
+ elif hasattr(t, '__typing_subst__'):
+ if t not in parameters:
+ if getattr(t, '__default__', None) is not None:
+ default_encountered = True
+ elif default_encountered:
+ raise TypeError(f'Type parameter {t!r} without a default'
+ ' follows type parameter with a default')
+
+ parameters.append(t)
+ else:
+ for x in getattr(t, '__parameters__', ()):
+ if x not in parameters:
+ parameters.append(x)
+
+ return tuple(parameters)
+
+ typing._collect_parameters = _collect_parameters
+
+# Backport typing.NamedTuple as it exists in Python 3.13.
+# In 3.11, the ability to define generic `NamedTuple`s was supported.
+# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
+# On 3.12, we added __orig_bases__ to call-based NamedTuples
+# On 3.13, we deprecated kwargs-based NamedTuples
+if sys.version_info >= (3, 13):
+ NamedTuple = typing.NamedTuple
+else:
+ def _make_nmtuple(name, types, module, defaults=()):
+ fields = [n for n, t in types]
+ annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
+ for n, t in types}
+ nm_tpl = collections.namedtuple(name, fields,
+ defaults=defaults, module=module)
+ nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
+ # The `_field_types` attribute was removed in 3.9;
+ # in earlier versions, it is the same as the `__annotations__` attribute
+ if sys.version_info < (3, 9):
+ nm_tpl._field_types = annotations
+ return nm_tpl
+
+ _prohibited_namedtuple_fields = typing._prohibited
+ _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
+
+ class _NamedTupleMeta(type):
+ def __new__(cls, typename, bases, ns):
+ assert _NamedTuple in bases
+ for base in bases:
+ if base is not _NamedTuple and base is not typing.Generic:
+ raise TypeError(
+ 'can only inherit from a NamedTuple type and Generic')
+ bases = tuple(tuple if base is _NamedTuple else base for base in bases)
+ types = ns.get('__annotations__', {})
+ default_names = []
+ for field_name in types:
+ if field_name in ns:
+ default_names.append(field_name)
+ elif default_names:
+ raise TypeError(f"Non-default namedtuple field {field_name} "
+ f"cannot follow default field"
+ f"{'s' if len(default_names) > 1 else ''} "
+ f"{', '.join(default_names)}")
+ nm_tpl = _make_nmtuple(
+ typename, types.items(),
+ defaults=[ns[n] for n in default_names],
+ module=ns['__module__']
+ )
+ nm_tpl.__bases__ = bases
+ if typing.Generic in bases:
+ if hasattr(typing, '_generic_class_getitem'): # 3.12+
+ nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem)
+ else:
+ class_getitem = typing.Generic.__class_getitem__.__func__
+ nm_tpl.__class_getitem__ = classmethod(class_getitem)
+ # update from user namespace without overriding special namedtuple attributes
+ for key, val in ns.items():
+ if key in _prohibited_namedtuple_fields:
+ raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
+ elif key not in _special_namedtuple_fields:
+ if key not in nm_tpl._fields:
+ setattr(nm_tpl, key, ns[key])
+ try:
+ set_name = type(val).__set_name__
+ except AttributeError:
+ pass
+ else:
+ try:
+ set_name(val, nm_tpl, key)
+ except BaseException as e:
+ msg = (
+ f"Error calling __set_name__ on {type(val).__name__!r} "
+ f"instance {key!r} in {typename!r}"
+ )
+ # BaseException.add_note() existed on py311,
+ # but the __set_name__ machinery didn't start
+ # using add_note() until py312.
+ # Making sure exceptions are raised in the same way
+ # as in "normal" classes seems most important here.
+ if sys.version_info >= (3, 12):
+ e.add_note(msg)
+ raise
+ else:
+ raise RuntimeError(msg) from e
+
+ if typing.Generic in bases:
+ nm_tpl.__init_subclass__()
+ return nm_tpl
+
+ _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
+
+ def _namedtuple_mro_entries(bases):
+ assert NamedTuple in bases
+ return (_NamedTuple,)
+
+ @_ensure_subclassable(_namedtuple_mro_entries)
+ def NamedTuple(typename, fields=_marker, /, **kwargs):
+ """Typed version of namedtuple.
+
+ Usage::
+
+ class Employee(NamedTuple):
+ name: str
+ id: int
+
+ This is equivalent to::
+
+ Employee = collections.namedtuple('Employee', ['name', 'id'])
+
+ The resulting class has an extra __annotations__ attribute, giving a
+ dict that maps field names to types. (The field names are also in
+ the _fields attribute, which is part of the namedtuple API.)
+ An alternative equivalent functional syntax is also accepted::
+
+ Employee = NamedTuple('Employee', [('name', str), ('id', int)])
+ """
+ if fields is _marker:
+ if kwargs:
+ deprecated_thing = "Creating NamedTuple classes using keyword arguments"
+ deprecation_msg = (
+ "{name} is deprecated and will be disallowed in Python {remove}. "
+ "Use the class-based or functional syntax instead."
+ )
+ else:
+ deprecated_thing = "Failing to pass a value for the 'fields' parameter"
+ example = f"`{typename} = NamedTuple({typename!r}, [])`"
+ deprecation_msg = (
+ "{name} is deprecated and will be disallowed in Python {remove}. "
+ "To create a NamedTuple class with 0 fields "
+ "using the functional syntax, "
+ "pass an empty list, e.g. "
+ ) + example + "."
+ elif fields is None:
+ if kwargs:
+ raise TypeError(
+ "Cannot pass `None` as the 'fields' parameter "
+ "and also specify fields using keyword arguments"
+ )
+ else:
+ deprecated_thing = "Passing `None` as the 'fields' parameter"
+ example = f"`{typename} = NamedTuple({typename!r}, [])`"
+ deprecation_msg = (
+ "{name} is deprecated and will be disallowed in Python {remove}. "
+ "To create a NamedTuple class with 0 fields "
+ "using the functional syntax, "
+ "pass an empty list, e.g. "
+ ) + example + "."
+ elif kwargs:
+ raise TypeError("Either list of fields or keywords"
+ " can be provided to NamedTuple, not both")
+ if fields is _marker or fields is None:
+ warnings.warn(
+ deprecation_msg.format(name=deprecated_thing, remove="3.15"),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ fields = kwargs.items()
+ nt = _make_nmtuple(typename, fields, module=_caller())
+ nt.__orig_bases__ = (NamedTuple,)
+ return nt
+
+
+if hasattr(collections.abc, "Buffer"):
+ Buffer = collections.abc.Buffer
+else:
+ class Buffer(abc.ABC):
+ """Base class for classes that implement the buffer protocol.
+
+ The buffer protocol allows Python objects to expose a low-level
+ memory buffer interface. Before Python 3.12, it is not possible
+ to implement the buffer protocol in pure Python code, or even
+ to check whether a class implements the buffer protocol. In
+ Python 3.12 and higher, the ``__buffer__`` method allows access
+ to the buffer protocol from Python code, and the
+ ``collections.abc.Buffer`` ABC allows checking whether a class
+ implements the buffer protocol.
+
+ To indicate support for the buffer protocol in earlier versions,
+ inherit from this ABC, either in a stub file or at runtime,
+ or use ABC registration. This ABC provides no methods, because
+ there is no Python-accessible methods shared by pre-3.12 buffer
+ classes. It is useful primarily for static checks.
+
+ """
+
+ # As a courtesy, register the most common stdlib buffer classes.
+ Buffer.register(memoryview)
+ Buffer.register(bytearray)
+ Buffer.register(bytes)
+
+
+# Backport of types.get_original_bases, available on 3.12+ in CPython
+if hasattr(_types, "get_original_bases"):
+ get_original_bases = _types.get_original_bases
+else:
+ def get_original_bases(cls, /):
+ """Return the class's "original" bases prior to modification by `__mro_entries__`.
+
+ Examples::
+
+ from typing import TypeVar, Generic
+ from typing_extensions import NamedTuple, TypedDict
+
+ T = TypeVar("T")
+ class Foo(Generic[T]): ...
+ class Bar(Foo[int], float): ...
+ class Baz(list[str]): ...
+ Eggs = NamedTuple("Eggs", [("a", int), ("b", str)])
+ Spam = TypedDict("Spam", {"a": int, "b": str})
+
+ assert get_original_bases(Bar) == (Foo[int], float)
+ assert get_original_bases(Baz) == (list[str],)
+ assert get_original_bases(Eggs) == (NamedTuple,)
+ assert get_original_bases(Spam) == (TypedDict,)
+ assert get_original_bases(int) == (object,)
+ """
+ try:
+ return cls.__dict__.get("__orig_bases__", cls.__bases__)
+ except AttributeError:
+ raise TypeError(
+ f'Expected an instance of type, not {type(cls).__name__!r}'
+ ) from None
+
+
+# NewType is a class on Python 3.10+, making it pickleable
+# The error message for subclassing instances of NewType was improved on 3.11+
+if sys.version_info >= (3, 11):
+ NewType = typing.NewType
+else:
+ class NewType:
+ """NewType creates simple unique types with almost zero
+ runtime overhead. NewType(name, tp) is considered a subtype of tp
+ by static type checkers. At runtime, NewType(name, tp) returns
+ a dummy callable that simply returns its argument. Usage::
+ UserId = NewType('UserId', int)
+ def name_by_id(user_id: UserId) -> str:
+ ...
+ UserId('user') # Fails type check
+ name_by_id(42) # Fails type check
+ name_by_id(UserId(42)) # OK
+ num = UserId(5) + 1 # type: int
+ """
+
+ def __call__(self, obj, /):
+ return obj
+
+ def __init__(self, name, tp):
+ self.__qualname__ = name
+ if '.' in name:
+ name = name.rpartition('.')[-1]
+ self.__name__ = name
+ self.__supertype__ = tp
+ def_mod = _caller()
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ def __mro_entries__(self, bases):
+ # We defined __mro_entries__ to get a better error message
+ # if a user attempts to subclass a NewType instance. bpo-46170
+ supercls_name = self.__name__
+
+ class Dummy:
+ def __init_subclass__(cls):
+ subcls_name = cls.__name__
+ raise TypeError(
+ f"Cannot subclass an instance of NewType. "
+ f"Perhaps you were looking for: "
+ f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`"
+ )
+
+ return (Dummy,)
+
+ def __repr__(self):
+ return f'{self.__module__}.{self.__qualname__}'
+
+ def __reduce__(self):
+ return self.__qualname__
+
+ if sys.version_info >= (3, 10):
+ # PEP 604 methods
+ # It doesn't make sense to have these methods on Python <3.10
+
+ def __or__(self, other):
+ return typing.Union[self, other]
+
+ def __ror__(self, other):
+ return typing.Union[other, self]
+
+
+if hasattr(typing, "TypeAliasType"):
+ TypeAliasType = typing.TypeAliasType
+else:
+ def _is_unionable(obj):
+ """Corresponds to is_unionable() in unionobject.c in CPython."""
+ return obj is None or isinstance(obj, (
+ type,
+ _types.GenericAlias,
+ _types.UnionType,
+ TypeAliasType,
+ ))
+
+ class TypeAliasType:
+ """Create named, parameterized type aliases.
+
+ This provides a backport of the new `type` statement in Python 3.12:
+
+ type ListOrSet[T] = list[T] | set[T]
+
+ is equivalent to:
+
+ T = TypeVar("T")
+ ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,))
+
+ The name ListOrSet can then be used as an alias for the type it refers to.
+
+ The type_params argument should contain all the type parameters used
+ in the value of the type alias. If the alias is not generic, this
+ argument is omitted.
+
+ Static type checkers should only support type aliases declared using
+ TypeAliasType that follow these rules:
+
+ - The first argument (the name) must be a string literal.
+ - The TypeAliasType instance must be immediately assigned to a variable
+ of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid,
+ as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)').
+
+ """
+
+ def __init__(self, name: str, value, *, type_params=()):
+ if not isinstance(name, str):
+ raise TypeError("TypeAliasType name must be a string")
+ self.__value__ = value
+ self.__type_params__ = type_params
+
+ parameters = []
+ for type_param in type_params:
+ if isinstance(type_param, TypeVarTuple):
+ parameters.extend(type_param)
+ else:
+ parameters.append(type_param)
+ self.__parameters__ = tuple(parameters)
+ def_mod = _caller()
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+ # Setting this attribute closes the TypeAliasType from further modification
+ self.__name__ = name
+
+ def __setattr__(self, name: str, value: object, /) -> None:
+ if hasattr(self, "__name__"):
+ self._raise_attribute_error(name)
+ super().__setattr__(name, value)
+
+ def __delattr__(self, name: str, /) -> Never:
+ self._raise_attribute_error(name)
+
+ def _raise_attribute_error(self, name: str) -> Never:
+ # Match the Python 3.12 error messages exactly
+ if name == "__name__":
+ raise AttributeError("readonly attribute")
+ elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}:
+ raise AttributeError(
+ f"attribute '{name}' of 'typing.TypeAliasType' objects "
+ "is not writable"
+ )
+ else:
+ raise AttributeError(
+ f"'typing.TypeAliasType' object has no attribute '{name}'"
+ )
+
+ def __repr__(self) -> str:
+ return self.__name__
+
+ def __getitem__(self, parameters):
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ parameters = [
+ typing._type_check(
+ item, f'Subscripting {self.__name__} requires a type.'
+ )
+ for item in parameters
+ ]
+ return typing._GenericAlias(self, tuple(parameters))
+
+ def __reduce__(self):
+ return self.__name__
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ "type 'typing_extensions.TypeAliasType' is not an acceptable base type"
+ )
+
+ # The presence of this method convinces typing._type_check
+ # that TypeAliasTypes are types.
+ def __call__(self):
+ raise TypeError("Type alias is not callable")
+
+ if sys.version_info >= (3, 10):
+ def __or__(self, right):
+ # For forward compatibility with 3.12, reject Unions
+ # that are not accepted by the built-in Union.
+ if not _is_unionable(right):
+ return NotImplemented
+ return typing.Union[self, right]
+
+ def __ror__(self, left):
+ if not _is_unionable(left):
+ return NotImplemented
+ return typing.Union[left, self]
+
+
+if hasattr(typing, "is_protocol"):
+ is_protocol = typing.is_protocol
+ get_protocol_members = typing.get_protocol_members
+else:
+ def is_protocol(tp: type, /) -> bool:
+ """Return True if the given type is a Protocol.
+
+ Example::
+
+ >>> from typing_extensions import Protocol, is_protocol
+ >>> class P(Protocol):
+ ... def a(self) -> str: ...
+ ... b: int
+ >>> is_protocol(P)
+ True
+ >>> is_protocol(int)
+ False
+ """
+ return (
+ isinstance(tp, type)
+ and getattr(tp, '_is_protocol', False)
+ and tp is not Protocol
+ and tp is not typing.Protocol
+ )
+
+ def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]:
+ """Return the set of members defined in a Protocol.
+
+ Example::
+
+ >>> from typing_extensions import Protocol, get_protocol_members
+ >>> class P(Protocol):
+ ... def a(self) -> str: ...
+ ... b: int
+ >>> get_protocol_members(P)
+ frozenset({'a', 'b'})
+
+ Raise a TypeError for arguments that are not Protocols.
+ """
+ if not is_protocol(tp):
+ raise TypeError(f'{tp!r} is not a Protocol')
+ if hasattr(tp, '__protocol_attrs__'):
+ return frozenset(tp.__protocol_attrs__)
+ return frozenset(_get_protocol_attrs(tp))
+
+
+if hasattr(typing, "Doc"):
+ Doc = typing.Doc
+else:
+ class Doc:
+ """Define the documentation of a type annotation using ``Annotated``, to be
+ used in class attributes, function and method parameters, return values,
+ and variables.
+
+ The value should be a positional-only string literal to allow static tools
+ like editors and documentation generators to use it.
+
+ This complements docstrings.
+
+ The string value passed is available in the attribute ``documentation``.
+
+ Example::
+
+ >>> from typing_extensions import Annotated, Doc
+ >>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ...
+ """
+ def __init__(self, documentation: str, /) -> None:
+ self.documentation = documentation
+
+ def __repr__(self) -> str:
+ return f"Doc({self.documentation!r})"
+
+ def __hash__(self) -> int:
+ return hash(self.documentation)
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Doc):
+ return NotImplemented
+ return self.documentation == other.documentation
+
+
+# Aliases for items that have always been in typing.
+# Explicitly assign these (rather than using `from typing import *` at the top),
+# so that we get a CI error if one of these is deleted from typing.py
+# in a future version of Python
+AbstractSet = typing.AbstractSet
+AnyStr = typing.AnyStr
+BinaryIO = typing.BinaryIO
+Callable = typing.Callable
+Collection = typing.Collection
+Container = typing.Container
+Dict = typing.Dict
+ForwardRef = typing.ForwardRef
+FrozenSet = typing.FrozenSet
+Generator = typing.Generator
+Generic = typing.Generic
+Hashable = typing.Hashable
+IO = typing.IO
+ItemsView = typing.ItemsView
+Iterable = typing.Iterable
+Iterator = typing.Iterator
+KeysView = typing.KeysView
+List = typing.List
+Mapping = typing.Mapping
+MappingView = typing.MappingView
+Match = typing.Match
+MutableMapping = typing.MutableMapping
+MutableSequence = typing.MutableSequence
+MutableSet = typing.MutableSet
+Optional = typing.Optional
+Pattern = typing.Pattern
+Reversible = typing.Reversible
+Sequence = typing.Sequence
+Set = typing.Set
+Sized = typing.Sized
+TextIO = typing.TextIO
+Tuple = typing.Tuple
+Union = typing.Union
+ValuesView = typing.ValuesView
+cast = typing.cast
+no_type_check = typing.no_type_check
+no_type_check_decorator = typing.no_type_check_decorator