applied-ai-018 commited on
Commit
9e86264
·
verified ·
1 Parent(s): 33a13ba

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/colorama/__pycache__/__init__.cpython-310.pyc +0 -0
  2. venv/lib/python3.10/site-packages/colorama/__pycache__/ansi.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/colorama/__pycache__/ansitowin32.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/colorama/__pycache__/initialise.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/colorama/__pycache__/win32.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/colorama/__pycache__/winterm.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/colorama/tests/__pycache__/initialise_test.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/colorama/tests/__pycache__/utils.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/evaluate/evaluator/__init__.py +140 -0
  10. venv/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py +151 -0
  11. venv/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py +112 -0
  12. venv/lib/python3.10/site-packages/evaluate/evaluator/base.py +544 -0
  13. venv/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py +119 -0
  14. venv/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py +239 -0
  15. venv/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py +267 -0
  16. venv/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py +69 -0
  17. venv/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py +278 -0
  18. venv/lib/python3.10/site-packages/evaluate/evaluator/utils.py +84 -0
  19. venv/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so +0 -0
  20. venv/lib/python3.10/site-packages/torch/_VF.py +30 -0
  21. venv/lib/python3.10/site-packages/torch/_VF.pyi +0 -0
  22. venv/lib/python3.10/site-packages/torch/__config__.py +22 -0
  23. venv/lib/python3.10/site-packages/torch/__future__.py +75 -0
  24. venv/lib/python3.10/site-packages/torch/__init__.py +2038 -0
  25. venv/lib/python3.10/site-packages/torch/_appdirs.py +666 -0
  26. venv/lib/python3.10/site-packages/torch/_classes.py +55 -0
  27. venv/lib/python3.10/site-packages/torch/_compile.py +30 -0
  28. venv/lib/python3.10/site-packages/torch/_custom_ops.py +322 -0
  29. venv/lib/python3.10/site-packages/torch/_deploy.py +105 -0
  30. venv/lib/python3.10/site-packages/torch/_guards.py +879 -0
  31. venv/lib/python3.10/site-packages/torch/_jit_internal.py +1510 -0
  32. venv/lib/python3.10/site-packages/torch/_lazy/__init__.py +55 -0
  33. venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/_lazy/closure.py +134 -0
  35. venv/lib/python3.10/site-packages/torch/_lazy/computation.py +26 -0
  36. venv/lib/python3.10/site-packages/torch/_lazy/config.py +16 -0
  37. venv/lib/python3.10/site-packages/torch/_lazy/debug.py +21 -0
  38. venv/lib/python3.10/site-packages/torch/_lazy/device_context.py +25 -0
  39. venv/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py +223 -0
  40. venv/lib/python3.10/site-packages/torch/_lazy/ir_cache.py +13 -0
  41. venv/lib/python3.10/site-packages/torch/_lazy/metrics.py +21 -0
  42. venv/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py +48 -0
  43. venv/lib/python3.10/site-packages/torch/_lazy/ts_backend.py +6 -0
  44. venv/lib/python3.10/site-packages/torch/_linalg_utils.py +164 -0
  45. venv/lib/python3.10/site-packages/torch/_lobpcg.py +1167 -0
  46. venv/lib/python3.10/site-packages/torch/_lowrank.py +298 -0
  47. venv/lib/python3.10/site-packages/torch/_meta_registrations.py +0 -0
  48. venv/lib/python3.10/site-packages/torch/_namedtensor_internals.py +157 -0
  49. venv/lib/python3.10/site-packages/torch/_ops.py +1037 -0
  50. venv/lib/python3.10/site-packages/torch/_python_dispatcher.py +181 -0
venv/lib/python3.10/site-packages/colorama/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (462 Bytes). View file
 
venv/lib/python3.10/site-packages/colorama/__pycache__/ansi.cpython-310.pyc ADDED
Binary file (2.99 kB). View file
 
venv/lib/python3.10/site-packages/colorama/__pycache__/ansitowin32.cpython-310.pyc ADDED
Binary file (8.5 kB). View file
 
venv/lib/python3.10/site-packages/colorama/__pycache__/initialise.cpython-310.pyc ADDED
Binary file (2.25 kB). View file
 
venv/lib/python3.10/site-packages/colorama/__pycache__/win32.cpython-310.pyc ADDED
Binary file (4.46 kB). View file
 
venv/lib/python3.10/site-packages/colorama/__pycache__/winterm.cpython-310.pyc ADDED
Binary file (5.15 kB). View file
 
venv/lib/python3.10/site-packages/colorama/tests/__pycache__/initialise_test.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
venv/lib/python3.10/site-packages/colorama/tests/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
venv/lib/python3.10/site-packages/evaluate/evaluator/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ try:
17
+ from transformers.pipelines import SUPPORTED_TASKS as SUPPORTED_PIPELINE_TASKS
18
+ from transformers.pipelines import TASK_ALIASES
19
+ from transformers.pipelines import check_task as check_pipeline_task
20
+
21
+ TRANSFORMERS_AVAILABLE = True
22
+ except ImportError:
23
+ TRANSFORMERS_AVAILABLE = False
24
+
25
+ from typing import Dict, List
26
+
27
+ from .audio_classification import AudioClassificationEvaluator
28
+ from .automatic_speech_recognition import AutomaticSpeechRecognitionEvaluator
29
+ from .base import Evaluator
30
+ from .image_classification import ImageClassificationEvaluator
31
+ from .question_answering import QuestionAnsweringEvaluator
32
+ from .text2text_generation import SummarizationEvaluator, Text2TextGenerationEvaluator, TranslationEvaluator
33
+ from .text_classification import TextClassificationEvaluator
34
+ from .text_generation import TextGenerationEvaluator
35
+ from .token_classification import TokenClassificationEvaluator
36
+
37
+
38
+ SUPPORTED_EVALUATOR_TASKS = {
39
+ "text-classification": {
40
+ "implementation": TextClassificationEvaluator,
41
+ "default_metric_name": "accuracy",
42
+ },
43
+ "image-classification": {
44
+ "implementation": ImageClassificationEvaluator,
45
+ "default_metric_name": "accuracy",
46
+ },
47
+ "question-answering": {
48
+ "implementation": QuestionAnsweringEvaluator,
49
+ "default_metric_name": "squad",
50
+ },
51
+ "token-classification": {
52
+ "implementation": TokenClassificationEvaluator,
53
+ "default_metric_name": "seqeval",
54
+ },
55
+ "text-generation": {
56
+ "implementation": TextGenerationEvaluator,
57
+ "default_metric_name": "word_count",
58
+ },
59
+ "text2text-generation": {
60
+ "implementation": Text2TextGenerationEvaluator,
61
+ "default_metric_name": "bleu",
62
+ },
63
+ "summarization": {
64
+ "implementation": SummarizationEvaluator,
65
+ "default_metric_name": "rouge",
66
+ },
67
+ "translation": {
68
+ "implementation": TranslationEvaluator,
69
+ "default_metric_name": "bleu",
70
+ },
71
+ "automatic-speech-recognition": {
72
+ "implementation": AutomaticSpeechRecognitionEvaluator,
73
+ "default_metric_name": "wer",
74
+ },
75
+ "audio-classification": {
76
+ "implementation": AudioClassificationEvaluator,
77
+ "default_metric_name": "accuracy",
78
+ },
79
+ }
80
+
81
+
82
+ def get_supported_tasks() -> List[str]:
83
+ """
84
+ Returns a list of supported task strings.
85
+ """
86
+ return list(SUPPORTED_EVALUATOR_TASKS.keys())
87
+
88
+
89
+ def check_task(task: str) -> Dict:
90
+ """
91
+ Checks an incoming task string, to validate it's correct and returns the default Evaluator class and default metric
92
+ name. It first performs a check to validata that the string is a valid `Pipeline` task, then it checks if it's a
93
+ valid `Evaluator` task. `Evaluator` tasks are a substet of `Pipeline` tasks.
94
+ Args:
95
+ task (`str`):
96
+ The task defining which evaluator will be returned. Currently accepted tasks are:
97
+ - `"image-classification"`
98
+ - `"question-answering"`
99
+ - `"text-classification"` (alias `"sentiment-analysis"` available)
100
+ - `"token-classification"`
101
+ Returns:
102
+ task_defaults: `dict`, contains the implementasion class of a give Evaluator and the default metric name.
103
+ """
104
+ if task in TASK_ALIASES:
105
+ task = TASK_ALIASES[task]
106
+ if not check_pipeline_task(task):
107
+ raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
108
+ if task in SUPPORTED_EVALUATOR_TASKS.keys() and task in SUPPORTED_PIPELINE_TASKS.keys():
109
+ return SUPPORTED_EVALUATOR_TASKS[task]
110
+ raise KeyError(f"Unknown task {task}, available tasks are: {get_supported_tasks()}.")
111
+
112
+
113
+ def evaluator(task: str = None) -> Evaluator:
114
+ """
115
+ Utility factory method to build an [`Evaluator`].
116
+ Evaluators encapsulate a task and a default metric name. They leverage `pipeline` functionality from `transformers`
117
+ to simplify the evaluation of multiple combinations of models, datasets and metrics for a given task.
118
+ Args:
119
+ task (`str`):
120
+ The task defining which evaluator will be returned. Currently accepted tasks are:
121
+ - `"image-classification"`: will return a [`ImageClassificationEvaluator`].
122
+ - `"question-answering"`: will return a [`QuestionAnsweringEvaluator`].
123
+ - `"text-classification"` (alias `"sentiment-analysis"` available): will return a [`TextClassificationEvaluator`].
124
+ - `"token-classification"`: will return a [`TokenClassificationEvaluator`].
125
+ Returns:
126
+ [`Evaluator`]: An evaluator suitable for the task.
127
+ Examples:
128
+ ```python
129
+ >>> from evaluate import evaluator
130
+ >>> # Sentiment analysis evaluator
131
+ >>> evaluator("sentiment-analysis")
132
+ ```"""
133
+ if not TRANSFORMERS_AVAILABLE:
134
+ raise ImportError(
135
+ "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[transformers]`."
136
+ )
137
+ targeted_task = check_task(task)
138
+ evaluator_class = targeted_task["implementation"]
139
+ default_metric_name = targeted_task["default_metric_name"]
140
+ return evaluator_class(task=task, default_metric_name=default_metric_name)
venv/lib/python3.10/site-packages/evaluate/evaluator/audio_classification.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from numbers import Number
16
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
17
+
18
+ from datasets import Dataset
19
+ from typing_extensions import Literal
20
+
21
+ from ..module import EvaluationModule
22
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
23
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ Examples:
32
+
33
+ <Tip>
34
+
35
+ Remember that, in order to process audio files, you need ffmpeg installed (https://ffmpeg.org/download.html)
36
+
37
+ </Tip>
38
+
39
+ ```python
40
+ >>> from evaluate import evaluator
41
+ >>> from datasets import load_dataset
42
+
43
+ >>> task_evaluator = evaluator("audio-classification")
44
+ >>> data = load_dataset("superb", 'ks', split="test[:40]")
45
+ >>> results = task_evaluator.compute(
46
+ >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
47
+ >>> data=data,
48
+ >>> label_column="label",
49
+ >>> input_column="file",
50
+ >>> metric="accuracy",
51
+ >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
52
+ >>> )
53
+ ```
54
+
55
+ <Tip>
56
+
57
+ The evaluator supports raw audio data as well, in the form of a numpy array. However, be aware that calling
58
+ the audio column automatically decodes and resamples the audio files, which can be slow for large datasets.
59
+
60
+ </Tip>
61
+
62
+ ```python
63
+ >>> from evaluate import evaluator
64
+ >>> from datasets import load_dataset
65
+
66
+ >>> task_evaluator = evaluator("audio-classification")
67
+ >>> data = load_dataset("superb", 'ks', split="test[:40]")
68
+ >>> data = data.map(lambda example: {"audio": example["audio"]["array"]})
69
+ >>> results = task_evaluator.compute(
70
+ >>> model_or_pipeline=""superb/wav2vec2-base-superb-ks"",
71
+ >>> data=data,
72
+ >>> label_column="label",
73
+ >>> input_column="audio",
74
+ >>> metric="accuracy",
75
+ >>> label_mapping={0: "yes", 1: "no", 2: "up", 3: "down"}
76
+ >>> )
77
+ ```
78
+ """
79
+
80
+
81
+ class AudioClassificationEvaluator(Evaluator):
82
+ """
83
+ Audio classification evaluator.
84
+ This audio classification evaluator can currently be loaded from [`evaluator`] using the default task name
85
+ `audio-classification`.
86
+ Methods in this class assume a data format compatible with the [`transformers.AudioClassificationPipeline`].
87
+ """
88
+
89
+ PIPELINE_KWARGS = {}
90
+
91
+ def __init__(self, task="audio-classification", default_metric_name=None):
92
+ super().__init__(task, default_metric_name=default_metric_name)
93
+
94
+ def predictions_processor(self, predictions, label_mapping):
95
+ pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
96
+ pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
97
+
98
+ return {"predictions": pred_label}
99
+
100
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
101
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
102
+ def compute(
103
+ self,
104
+ model_or_pipeline: Union[
105
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
106
+ ] = None,
107
+ data: Union[str, Dataset] = None,
108
+ subset: Optional[str] = None,
109
+ split: Optional[str] = None,
110
+ metric: Union[str, EvaluationModule] = None,
111
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
112
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
113
+ strategy: Literal["simple", "bootstrap"] = "simple",
114
+ confidence_level: float = 0.95,
115
+ n_resamples: int = 9999,
116
+ device: int = None,
117
+ random_state: Optional[int] = None,
118
+ input_column: str = "file",
119
+ label_column: str = "label",
120
+ label_mapping: Optional[Dict[str, Number]] = None,
121
+ ) -> Tuple[Dict[str, float], Any]:
122
+
123
+ """
124
+ input_column (`str`, defaults to `"file"`):
125
+ The name of the column containing either the audio files or a raw waveform, represented as a numpy array, in the dataset specified by `data`.
126
+ label_column (`str`, defaults to `"label"`):
127
+ The name of the column containing the labels in the dataset specified by `data`.
128
+ label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
129
+ We want to map class labels defined by the model in the pipeline to values consistent with those
130
+ defined in the `label_column` of the `data` dataset.
131
+ """
132
+
133
+ result = super().compute(
134
+ model_or_pipeline=model_or_pipeline,
135
+ data=data,
136
+ subset=subset,
137
+ split=split,
138
+ metric=metric,
139
+ tokenizer=tokenizer,
140
+ feature_extractor=feature_extractor,
141
+ strategy=strategy,
142
+ confidence_level=confidence_level,
143
+ n_resamples=n_resamples,
144
+ device=device,
145
+ random_state=random_state,
146
+ input_column=input_column,
147
+ label_column=label_column,
148
+ label_mapping=label_mapping,
149
+ )
150
+
151
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/automatic_speech_recognition.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
16
+
17
+ from datasets import Dataset
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
27
+
28
+
29
+ TASK_DOCUMENTATION = r"""
30
+ Examples:
31
+ ```python
32
+ >>> from evaluate import evaluator
33
+ >>> from datasets import load_dataset
34
+ >>> task_evaluator = evaluator("automatic-speech-recognition")
35
+ >>> data = load_dataset("mozilla-foundation/common_voice_11_0", "en", split="validation[:40]")
36
+ >>> results = task_evaluator.compute(
37
+ >>> model_or_pipeline="https://huggingface.co/openai/whisper-tiny.en",
38
+ >>> data=data,
39
+ >>> input_column="path",
40
+ >>> label_column="sentence",
41
+ >>> metric="wer",
42
+ >>> )
43
+ ```
44
+ """
45
+
46
+
47
+ class AutomaticSpeechRecognitionEvaluator(Evaluator):
48
+ """
49
+ Automatic speech recognition evaluator.
50
+ This automatic speech recognition evaluator can currently be loaded from [`evaluator`] using the default task name
51
+ `automatic-speech-recognition`.
52
+ Methods in this class assume a data format compatible with the [`AutomaticSpeechRecognitionPipeline`].
53
+ """
54
+
55
+ PIPELINE_KWARGS = {"truncation": True}
56
+
57
+ def __init__(self, task="automatic-speech-recognition", default_metric_name=None):
58
+ super().__init__(task, default_metric_name=default_metric_name)
59
+
60
+ def predictions_processor(self, predictions, label_mapping):
61
+ return {"predictions": [pred["text"] for pred in predictions]}
62
+
63
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
64
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
65
+ def compute(
66
+ self,
67
+ model_or_pipeline: Union[
68
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
69
+ ] = None,
70
+ data: Union[str, Dataset] = None,
71
+ subset: Optional[str] = None,
72
+ split: Optional[str] = None,
73
+ metric: Union[str, EvaluationModule] = None,
74
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
75
+ strategy: Literal["simple", "bootstrap"] = "simple",
76
+ confidence_level: float = 0.95,
77
+ n_resamples: int = 9999,
78
+ device: int = None,
79
+ random_state: Optional[int] = None,
80
+ input_column: str = "path",
81
+ label_column: str = "sentence",
82
+ generation_kwargs: dict = None,
83
+ ) -> Tuple[Dict[str, float], Any]:
84
+ """
85
+ input_column (`str`, defaults to `"path"`):
86
+ the name of the column containing the input audio path in the dataset specified by `data`.
87
+ label_column (`str`, defaults to `"sentence"`):
88
+ the name of the column containing the labels in the dataset specified by `data`.
89
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
90
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
91
+ """
92
+
93
+ if generation_kwargs is not None:
94
+ self.PIPELINE_KWARGS.update(generation_kwargs)
95
+
96
+ result = super().compute(
97
+ model_or_pipeline=model_or_pipeline,
98
+ data=data,
99
+ subset=subset,
100
+ split=split,
101
+ metric=metric,
102
+ tokenizer=tokenizer,
103
+ strategy=strategy,
104
+ confidence_level=confidence_level,
105
+ n_resamples=n_resamples,
106
+ device=device,
107
+ random_state=random_state,
108
+ input_column=input_column,
109
+ label_column=label_column,
110
+ )
111
+
112
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/base.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from numbers import Number
17
+ from typing import Any, Callable, Dict, List, Optional, Union
18
+
19
+ # Lint as: python3
20
+ from datasets import Dataset, load_dataset
21
+
22
+ from evaluate.evaluator.utils import choose_split
23
+
24
+
25
+ try:
26
+ from scipy.stats import bootstrap
27
+
28
+ SCIPY_AVAILABLE = True
29
+ except ImportError:
30
+ SCIPY_AVAILABLE = False
31
+
32
+ try:
33
+ import transformers
34
+ from transformers import Pipeline, pipeline
35
+
36
+ TRANSFORMERS_AVAILABLE = True
37
+ except ImportError:
38
+ TRANSFORMERS_AVAILABLE = False
39
+
40
+ from time import perf_counter
41
+
42
+ from typing_extensions import Literal
43
+
44
+ from ..loading import load
45
+ from ..module import EvaluationModule
46
+ from ..utils.logging import get_logger
47
+ from .utils import DatasetColumn
48
+
49
+
50
+ logger = get_logger(__name__)
51
+
52
+
53
+ EVALUTOR_COMPUTE_START_DOCSTRING = r"""
54
+ Compute the metric for a given pipeline and dataset combination.
55
+ Args:
56
+ model_or_pipeline (`str` or `Pipeline` or `Callable` or `PreTrainedModel` or `TFPreTrainedModel`, defaults to `None`):
57
+ If the argument in not specified, we initialize the default pipeline for the task (in this case
58
+ `text-classification` or its alias - `sentiment-analysis`). If the argument is of the type `str` or
59
+ is a model instance, we use it to initialize a new `Pipeline` with the given model. Otherwise we assume the
60
+ argument specifies a pre-initialized pipeline.
61
+ data (`str` or `Dataset`, defaults to `None`):
62
+ Specifies the dataset we will run evaluation on. If it is of type `str`, we treat it as the dataset
63
+ name, and load it. Otherwise we assume it represents a pre-loaded dataset.
64
+ subset (`str`, defaults to `None`):
65
+ Defines which dataset subset to load. If `None` is passed the default subset is loaded.
66
+ split (`str`, defaults to `None`):
67
+ Defines which dataset split to load. If `None` is passed, infers based on the `choose_split` function.
68
+ metric (`str` or `EvaluationModule`, defaults to `None`):
69
+ Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
70
+ load it. Otherwise we assume it represents a pre-loaded metric.
71
+ tokenizer (`str` or `PreTrainedTokenizer`, *optional*, defaults to `None`):
72
+ Argument can be used to overwrite a default tokenizer if `model_or_pipeline` represents a model for
73
+ which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
74
+ this argument.
75
+ strategy (`Literal["simple", "bootstrap"]`, defaults to "simple"):
76
+ specifies the evaluation strategy. Possible values are:
77
+ - `"simple"` - we evaluate the metric and return the scores.
78
+ - `"bootstrap"` - on top of computing the metric scores, we calculate the confidence interval for each
79
+ of the returned metric keys, using `scipy`'s `bootstrap` method
80
+ https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bootstrap.html.
81
+ confidence_level (`float`, defaults to `0.95`):
82
+ The `confidence_level` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
83
+ n_resamples (`int`, defaults to `9999`):
84
+ The `n_resamples` value passed to `bootstrap` if `"bootstrap"` strategy is chosen.
85
+ device (`int`, defaults to `None`):
86
+ Device ordinal for CPU/GPU support of the pipeline. Setting this to -1 will leverage CPU, a positive
87
+ integer will run the model on the associated CUDA device ID. If `None` is provided it will be inferred and
88
+ CUDA:0 used if available, CPU otherwise.
89
+ random_state (`int`, *optional*, defaults to `None`):
90
+ The `random_state` value passed to `bootstrap` if `"bootstrap"` strategy is chosen. Useful for
91
+ debugging.
92
+ """
93
+
94
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING = r"""
95
+ Return:
96
+ A `Dict`. The keys represent metric keys calculated for the `metric` spefied in function arguments. For the
97
+ `"simple"` strategy, the value is the metric score. For the `"bootstrap"` strategy, the value is a `Dict`
98
+ containing the score, the confidence interval and the standard error calculated for each metric key.
99
+ """
100
+
101
+
102
+ class Evaluator(ABC):
103
+ """
104
+ The [`Evaluator`] class is the class from which all evaluators inherit. Refer to this class for methods shared across
105
+ different evaluators.
106
+ Base class implementing evaluator operations.
107
+ """
108
+
109
+ PIPELINE_KWARGS = {}
110
+ METRIC_KWARGS = {}
111
+
112
+ def __init__(self, task: str, default_metric_name: str = None):
113
+ if not TRANSFORMERS_AVAILABLE:
114
+ raise ImportError(
115
+ "If you want to use the `Evaluator` you need `transformers`. Run `pip install evaluate[evaluator]`."
116
+ )
117
+ if not SCIPY_AVAILABLE:
118
+ raise ImportError(
119
+ "If you want to use the `Evaluator` you need `scipy>=1.7.1`. Run `pip install evaluate[evaluator]`."
120
+ )
121
+ self.task = task
122
+ self.default_metric_name = default_metric_name
123
+
124
+ @staticmethod
125
+ def _compute_confidence_interval(
126
+ metric,
127
+ metric_inputs,
128
+ metric_keys: List[str],
129
+ confidence_level: float = 0.95,
130
+ n_resamples: int = 9999,
131
+ random_state: Optional[int] = None,
132
+ ) -> Dict[str, Any]:
133
+ """
134
+ A utility function enabling the confidence interval calculation for metrics computed
135
+ by the evaluator based on `scipy`'s `bootstrap` method.
136
+ """
137
+
138
+ # bootstrap only works with functions that use args and no kwargs
139
+ def build_args_metric(metric, key, **kwargs):
140
+ def args_metric(*args):
141
+ return metric.compute(**{k: v for k, v in zip(kwargs.keys(), args)})[key]
142
+
143
+ return args_metric
144
+
145
+ bootstrap_dict = {}
146
+ for key in metric_keys:
147
+ bs = bootstrap(
148
+ data=list(metric_inputs.values()),
149
+ statistic=build_args_metric(metric, key, **metric_inputs),
150
+ paired=True,
151
+ vectorized=False,
152
+ confidence_level=confidence_level,
153
+ n_resamples=n_resamples,
154
+ random_state=random_state,
155
+ )
156
+ bootstrap_dict[key] = {
157
+ "confidence_interval": (bs.confidence_interval.low, bs.confidence_interval.high),
158
+ "standard_error": bs.standard_error,
159
+ }
160
+ return bootstrap_dict
161
+
162
+ @staticmethod
163
+ def _compute_time_perf(start_time: float, end_time: float, num_samples: int) -> Dict[str, Any]:
164
+ """
165
+ A utility function computing time performance metrics:
166
+ - `total_time_in_seconds` - pipeline inference runtime for the evaluation data in seconds,
167
+ - `samples_per_second` - pipeline throughput in the number of samples per second.
168
+ - `latency_in_seconds` - pipeline inference runtime for the evaluation data in seconds per sample,
169
+
170
+ """
171
+ latency = end_time - start_time
172
+ throughput = num_samples / latency
173
+ latency_sample = 1.0 / throughput
174
+
175
+ return {
176
+ "total_time_in_seconds": latency,
177
+ "samples_per_second": throughput,
178
+ "latency_in_seconds": latency_sample,
179
+ }
180
+
181
+ @staticmethod
182
+ def _infer_device() -> int:
183
+ """Helper function to check if GPU or CPU is available for inference."""
184
+ # try infer with torch first
185
+ try:
186
+ import torch
187
+
188
+ if torch.cuda.is_available():
189
+ device = 0 # first GPU
190
+ else:
191
+ device = -1 # CPU
192
+ except ImportError:
193
+ # if not available try TF
194
+ try:
195
+ import tensorflow as tf
196
+
197
+ if len(tf.config.list_physical_devices("GPU")) > 0:
198
+ device = 0 # first GPU
199
+ else:
200
+ device = -1 # CPU
201
+ except ImportError:
202
+ device = -1
203
+
204
+ if device == -1:
205
+ logger.info("No GPU found. The default device for pipeline inference is set to CPU.")
206
+ else:
207
+ logger.info("GPU found. The default device for pipeline inference is set to GPU (CUDA:0).")
208
+
209
+ return device
210
+
211
+ @abstractmethod
212
+ def predictions_processor(self, *args, **kwargs):
213
+ """
214
+ A core method of the `Evaluator` class, which processes the pipeline outputs for compatibility with the metric.
215
+ """
216
+ raise NotImplementedError()
217
+
218
+ def compute(
219
+ self,
220
+ model_or_pipeline: Union[
221
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
222
+ ] = None,
223
+ data: Union[str, Dataset] = None,
224
+ subset: Optional[str] = None,
225
+ split: Optional[str] = None,
226
+ metric: Union[str, EvaluationModule] = None,
227
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
228
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
229
+ strategy: Literal["simple", "bootstrap"] = "simple",
230
+ confidence_level: float = 0.95,
231
+ n_resamples: int = 9999,
232
+ device: int = None,
233
+ random_state: Optional[int] = None,
234
+ input_column: str = "text",
235
+ label_column: str = "label",
236
+ label_mapping: Optional[Dict[str, Number]] = None,
237
+ ) -> Dict[str, float]:
238
+
239
+ result = {}
240
+
241
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
242
+
243
+ # Prepare inputs
244
+ data = self.load_data(data=data, subset=subset, split=split)
245
+ metric_inputs, pipe_inputs = self.prepare_data(data=data, input_column=input_column, label_column=label_column)
246
+ pipe = self.prepare_pipeline(
247
+ model_or_pipeline=model_or_pipeline,
248
+ tokenizer=tokenizer,
249
+ feature_extractor=feature_extractor,
250
+ device=device,
251
+ )
252
+ metric = self.prepare_metric(metric)
253
+
254
+ # Compute predictions
255
+ predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
256
+ predictions = self.predictions_processor(predictions, label_mapping)
257
+
258
+ metric_inputs.update(predictions)
259
+
260
+ # Compute metrics from references and predictions
261
+ metric_results = self.compute_metric(
262
+ metric=metric,
263
+ metric_inputs=metric_inputs,
264
+ strategy=strategy,
265
+ confidence_level=confidence_level,
266
+ n_resamples=n_resamples,
267
+ random_state=random_state,
268
+ )
269
+
270
+ # TODO: To clarify why `wer` and `cer` return float
271
+ # even though metric.compute contract says that it
272
+ # returns Optional[dict].
273
+ if type(metric_results) == float:
274
+ metric_results = {metric.name: metric_results}
275
+
276
+ result.update(metric_results)
277
+ result.update(perf_results)
278
+
279
+ return result
280
+
281
+ @staticmethod
282
+ def check_for_mismatch_in_device_setup(device, model_or_pipeline):
283
+ if device is not None and device != -1 and isinstance(model_or_pipeline, Pipeline):
284
+ if model_or_pipeline.device.type == "cpu":
285
+ raise ValueError(
286
+ "The value of the `device` kwarg passed to `compute` suggests that this pipe should be run on an "
287
+ "accelerator, but the pipe was instantiated on CPU. Pass `device` to the pipeline during "
288
+ "initialization to use an accelerator, or pass `device=None` to `compute`. "
289
+ )
290
+ elif device != model_or_pipeline.device.index:
291
+ raise ValueError(
292
+ f"This pipeline was instantiated on device {model_or_pipeline.device.index} but device={device} was passed to `compute`."
293
+ )
294
+
295
+ def check_required_columns(self, data: Union[str, Dataset], columns_names: Dict[str, str]):
296
+ """
297
+ Ensure the columns required for the evaluation are present in the dataset.
298
+
299
+ Args:
300
+ data (`str` or [`Dataset`]):
301
+ Specifies the dataset we will run evaluation on.
302
+ columns_names (`List[str]`):
303
+ List of column names to check in the dataset. The keys are the arguments to the [`evaluate.EvaluationModule.compute`] method,
304
+ while the values are the column names to check.
305
+
306
+ Example:
307
+
308
+ ```py
309
+ >>> from datasets import load_dataset
310
+ >>> from evaluate import evaluator
311
+ >>> data = load_dataset("rotten_tomatoes', split="train")
312
+ >>> evaluator.check_required_columns(data, {"input_column": "text", "label_column": "label"})
313
+ ```
314
+ """
315
+ for input_name, column_name in columns_names.items():
316
+ if column_name not in data.column_names:
317
+ raise ValueError(
318
+ f"Invalid `{input_name}` {column_name} specified. The dataset contains the following columns: {data.column_names}."
319
+ )
320
+
321
+ @staticmethod
322
+ def get_dataset_split(data, subset=None, split=None):
323
+ """
324
+ Infers which split to use if `None` is given.
325
+
326
+ Args:
327
+ data (`str`):
328
+ Name of dataset.
329
+ subset (`str`):
330
+ Name of config for datasets with multiple configurations (e.g. 'glue/cola').
331
+ split (`str`, defaults to `None`):
332
+ Split to use.
333
+ Returns:
334
+ `split`: `str` containing which split to use
335
+
336
+ Example:
337
+
338
+ ```py
339
+ >>> from evaluate import evaluator
340
+ >>> evaluator("text-classification").get_dataset_split(data="rotten_tomatoes")
341
+ WARNING:evaluate.evaluator.base:Dataset split not defined! Automatically evaluating with split: TEST
342
+ 'test'
343
+ ```
344
+ """
345
+ if split is None:
346
+ split = choose_split(data, subset)
347
+ logger.warning(f"Dataset split not defined! Automatically evaluating with split: {split.upper()}")
348
+ return split
349
+
350
+ def load_data(self, data: Union[str, Dataset], subset: str = None, split: str = None):
351
+ """
352
+ Load dataset with given subset and split.
353
+ Args:
354
+ data ([`Dataset`] or `str`, defaults to `None`):
355
+ Specifies the dataset we will run evaluation on. If it is of
356
+ type `str`, we treat it as the dataset name, and load it. Otherwise we assume it represents a pre-loaded dataset.
357
+ subset (`str`, defaults to `None`):
358
+ Specifies dataset subset to be passed to `name` in `load_dataset`. To be
359
+ used with datasets with several configurations (e.g. glue/sst2).
360
+ split (`str`, defaults to `None`):
361
+ User-defined dataset split by name (e.g. train, validation, test). Supports slice-split (`test[:n]`).
362
+ If not defined and data is a `str` type, will automatically select the best one via `choose_split()`.
363
+ Returns:
364
+ data ([`Dataset`]): Loaded dataset which will be used for evaluation.
365
+
366
+ Example:
367
+
368
+ ```py
369
+ >>> from evaluate import evaluator
370
+ >>> evaluator("text-classification").load_data(data="rotten_tomatoes", split="train")
371
+ Dataset({
372
+ features: ['text', 'label'],
373
+ num_rows: 8530
374
+ })
375
+ ```
376
+ """
377
+ if isinstance(data, str):
378
+ split = self.get_dataset_split(data, subset, split)
379
+ data = load_dataset(data, name=subset, split=split)
380
+ return data
381
+ elif isinstance(data, Dataset):
382
+ if split is not None or subset is not None:
383
+ logger.warning("`data` is a preloaded Dataset! Ignoring `subset` and `split`.")
384
+ return data
385
+ else:
386
+ raise ValueError(
387
+ "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
388
+ )
389
+
390
+ def prepare_data(self, data: Dataset, input_column: str, label_column: str, *args, **kwargs):
391
+ """
392
+ Prepare data.
393
+
394
+ Args:
395
+ data ([`Dataset`]):
396
+ Specifies the dataset we will run evaluation on.
397
+ input_column (`str`, defaults to `"text"`):
398
+ The name of the column containing the text feature in the dataset specified by `data`.
399
+ second_input_column(`str`, *optional*):
400
+ The name of the column containing the second text feature if there is one. Otherwise, set to `None`.
401
+ label_column (`str`, defaults to `"label"`):
402
+ The name of the column containing the labels in the dataset specified by `data`.
403
+ Returns:
404
+ `dict`: metric inputs.
405
+ `list`: pipeline inputs.
406
+
407
+ Example:
408
+
409
+ ```py
410
+ >>> from evaluate import evaluator
411
+ >>> from datasets import load_dataset
412
+
413
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
414
+ >>> evaluator("text-classification").prepare_data(ds, input_column="text", second_input_column=None, label_column="label")
415
+ ```
416
+ """
417
+
418
+ self.check_required_columns(data, {"input_column": input_column, "label_column": label_column})
419
+
420
+ return {"references": data[label_column]}, DatasetColumn(data, input_column)
421
+
422
+ def prepare_pipeline(
423
+ self,
424
+ model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
425
+ tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
426
+ feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
427
+ device: int = None,
428
+ ):
429
+ """
430
+ Prepare pipeline.
431
+
432
+ Args:
433
+ model_or_pipeline (`str` or [`~transformers.Pipeline`] or `Callable` or [`~transformers.PreTrainedModel`] or [`~transformers.TFPreTrainedModel`], defaults to `None`):
434
+ If the argument in not specified, we initialize the default pipeline for the task. If the argument is of the type `str` or
435
+ is a model instance, we use it to initialize a new [`~transformers.Pipeline`] with the given model. Otherwise we assume the
436
+ argument specifies a pre-initialized pipeline.
437
+ preprocessor ([`~transformers.PreTrainedTokenizerBase`] or [`~transformers.FeatureExtractionMixin`], *optional*, defaults to `None`):
438
+ Argument can be used to overwrite a default preprocessor if `model_or_pipeline` represents a model for
439
+ which we build a pipeline. If `model_or_pipeline` is `None` or a pre-initialized pipeline, we ignore
440
+ this argument.
441
+ Returns:
442
+ The initialized pipeline.
443
+
444
+ Example:
445
+
446
+ ```py
447
+ >>> from evaluate import evaluator
448
+ >>> evaluator("text-classification").prepare_pipeline(model_or_pipeline="distilbert-base-uncased")
449
+ ```
450
+ """
451
+
452
+ if device is None:
453
+ device = self._infer_device()
454
+
455
+ if (
456
+ isinstance(model_or_pipeline, str)
457
+ or isinstance(model_or_pipeline, transformers.PreTrainedModel)
458
+ or isinstance(model_or_pipeline, transformers.TFPreTrainedModel)
459
+ ):
460
+ pipe = pipeline(
461
+ self.task,
462
+ model=model_or_pipeline,
463
+ tokenizer=tokenizer,
464
+ feature_extractor=feature_extractor,
465
+ device=device,
466
+ )
467
+ else:
468
+ if model_or_pipeline is None:
469
+ pipe = pipeline(self.task, device=device)
470
+ else:
471
+ pipe = model_or_pipeline
472
+ if tokenizer is not None and feature_extractor is not None:
473
+ logger.warning("Ignoring the value of the preprocessor argument (`tokenizer` or `feature_extractor`).")
474
+ if (pipe.task != self.task) and not (self.task == "translation" and pipe.task.startswith("translation")):
475
+ raise ValueError(
476
+ f"Incompatible `model_or_pipeline`. Please specify `model_or_pipeline` compatible with the `{self.task}` task."
477
+ )
478
+ return pipe
479
+
480
+ def prepare_metric(self, metric: Union[str, EvaluationModule]):
481
+ """
482
+ Prepare metric.
483
+
484
+ Args:
485
+ metric (`str` or [`EvaluationModule`], defaults to `None`):
486
+ Specifies the metric we use in evaluator. If it is of type `str`, we treat it as the metric name, and
487
+ load it. Otherwise we assume it represents a pre-loaded metric.
488
+
489
+ Returns:
490
+ The loaded metric.
491
+
492
+ Example:
493
+
494
+ ```py
495
+ >>> from evaluate import evaluator
496
+ >>> evaluator("text-classification").prepare_metric("accuracy")
497
+ ```
498
+ """
499
+ # Prepare metric.
500
+ if metric is None:
501
+ if self.default_metric_name is None:
502
+ raise ValueError(
503
+ "`Evaluator` doesn't specify a default metric. Please specify a valid `metric` argument."
504
+ )
505
+ metric = load(self.default_metric_name)
506
+ elif isinstance(metric, str):
507
+ metric = load(metric)
508
+
509
+ return metric
510
+
511
+ def call_pipeline(self, pipe, *args, **kwargs):
512
+ start_time = perf_counter()
513
+ pipe_output = pipe(*args, **kwargs, **self.PIPELINE_KWARGS)
514
+ end_time = perf_counter()
515
+ return pipe_output, self._compute_time_perf(start_time, end_time, len(pipe_output))
516
+
517
+ def compute_metric(
518
+ self,
519
+ metric: EvaluationModule,
520
+ metric_inputs: Dict,
521
+ strategy: Literal["simple", "bootstrap"] = "simple",
522
+ confidence_level: float = 0.95,
523
+ n_resamples: int = 9999,
524
+ random_state: Optional[int] = None,
525
+ ):
526
+ """Compute and return metrics."""
527
+ result = metric.compute(**metric_inputs, **self.METRIC_KWARGS)
528
+
529
+ if strategy == "bootstrap":
530
+ metric_keys = result.keys()
531
+ bootstrap_dict = self._compute_confidence_interval(
532
+ metric,
533
+ metric_inputs,
534
+ metric_keys,
535
+ confidence_level,
536
+ n_resamples,
537
+ random_state,
538
+ )
539
+ for key in metric_keys:
540
+ bootstrap_dict[key]["score"] = result[key]
541
+
542
+ return bootstrap_dict
543
+
544
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/image_classification.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from numbers import Number
16
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
17
+
18
+ from datasets import Dataset
19
+ from typing_extensions import Literal
20
+
21
+ from ..module import EvaluationModule
22
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
23
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import FeatureExtractionMixin, Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ Examples:
32
+ ```python
33
+ >>> from evaluate import evaluator
34
+ >>> from datasets import load_dataset
35
+ >>> task_evaluator = evaluator("image-classification")
36
+ >>> data = load_dataset("beans", split="test[:40]")
37
+ >>> results = task_evaluator.compute(
38
+ >>> model_or_pipeline="nateraw/vit-base-beans",
39
+ >>> data=data,
40
+ >>> label_column="labels",
41
+ >>> metric="accuracy",
42
+ >>> label_mapping={'angular_leaf_spot': 0, 'bean_rust': 1, 'healthy': 2},
43
+ >>> strategy="bootstrap"
44
+ >>> )
45
+ ```
46
+ """
47
+
48
+
49
+ class ImageClassificationEvaluator(Evaluator):
50
+ """
51
+ Image classification evaluator.
52
+ This image classification evaluator can currently be loaded from [`evaluator`] using the default task name
53
+ `image-classification`.
54
+ Methods in this class assume a data format compatible with the [`ImageClassificationPipeline`].
55
+ """
56
+
57
+ PIPELINE_KWARGS = {}
58
+
59
+ def __init__(self, task="image-classification", default_metric_name=None):
60
+ super().__init__(task, default_metric_name=default_metric_name)
61
+
62
+ def predictions_processor(self, predictions, label_mapping):
63
+ pred_label = [max(pred, key=lambda x: x["score"])["label"] for pred in predictions]
64
+ pred_label = [label_mapping[pred] if label_mapping is not None else pred for pred in pred_label]
65
+
66
+ return {"predictions": pred_label}
67
+
68
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
69
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
70
+ def compute(
71
+ self,
72
+ model_or_pipeline: Union[
73
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
74
+ ] = None,
75
+ data: Union[str, Dataset] = None,
76
+ subset: Optional[str] = None,
77
+ split: Optional[str] = None,
78
+ metric: Union[str, EvaluationModule] = None,
79
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
80
+ feature_extractor: Optional[Union[str, "FeatureExtractionMixin"]] = None, # noqa: F821
81
+ strategy: Literal["simple", "bootstrap"] = "simple",
82
+ confidence_level: float = 0.95,
83
+ n_resamples: int = 9999,
84
+ device: int = None,
85
+ random_state: Optional[int] = None,
86
+ input_column: str = "image",
87
+ label_column: str = "label",
88
+ label_mapping: Optional[Dict[str, Number]] = None,
89
+ ) -> Tuple[Dict[str, float], Any]:
90
+
91
+ """
92
+ input_column (`str`, defaults to `"image"`):
93
+ The name of the column containing the images as PIL ImageFile in the dataset specified by `data`.
94
+ label_column (`str`, defaults to `"label"`):
95
+ The name of the column containing the labels in the dataset specified by `data`.
96
+ label_mapping (`Dict[str, Number]`, *optional*, defaults to `None`):
97
+ We want to map class labels defined by the model in the pipeline to values consistent with those
98
+ defined in the `label_column` of the `data` dataset.
99
+ """
100
+
101
+ result = super().compute(
102
+ model_or_pipeline=model_or_pipeline,
103
+ data=data,
104
+ subset=subset,
105
+ split=split,
106
+ metric=metric,
107
+ tokenizer=tokenizer,
108
+ feature_extractor=feature_extractor,
109
+ strategy=strategy,
110
+ confidence_level=confidence_level,
111
+ n_resamples=n_resamples,
112
+ device=device,
113
+ random_state=random_state,
114
+ input_column=input_column,
115
+ label_column=label_column,
116
+ label_mapping=label_mapping,
117
+ )
118
+
119
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/question_answering.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ # Lint as: python3
18
+ from datasets import Dataset
19
+
20
+
21
+ try:
22
+ TRANSFORMERS_AVAILABLE = True
23
+ except ImportError:
24
+ TRANSFORMERS_AVAILABLE = False
25
+
26
+ from typing_extensions import Literal
27
+
28
+ from ..module import EvaluationModule
29
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
30
+ from ..utils.logging import get_logger
31
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
32
+ from .utils import DatasetColumn
33
+
34
+
35
+ if TYPE_CHECKING:
36
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
37
+
38
+
39
+ logger = get_logger(__name__)
40
+
41
+
42
+ TASK_DOCUMENTATION = r"""
43
+ Examples:
44
+ ```python
45
+ >>> from evaluate import evaluator
46
+ >>> from datasets import load_dataset
47
+ >>> task_evaluator = evaluator("question-answering")
48
+ >>> data = load_dataset("squad", split="validation[:2]")
49
+ >>> results = task_evaluator.compute(
50
+ >>> model_or_pipeline="sshleifer/tiny-distilbert-base-cased-distilled-squad",
51
+ >>> data=data,
52
+ >>> metric="squad",
53
+ >>> )
54
+ ```
55
+
56
+ <Tip>
57
+
58
+ Datasets where the answer may be missing in the context are supported, for example SQuAD v2 dataset. In this case, it is safer to pass `squad_v2_format=True` to
59
+ the compute() call.
60
+
61
+ </Tip>
62
+
63
+ ```python
64
+ >>> from evaluate import evaluator
65
+ >>> from datasets import load_dataset
66
+ >>> task_evaluator = evaluator("question-answering")
67
+ >>> data = load_dataset("squad_v2", split="validation[:2]")
68
+ >>> results = task_evaluator.compute(
69
+ >>> model_or_pipeline="mrm8488/bert-tiny-finetuned-squadv2",
70
+ >>> data=data,
71
+ >>> metric="squad_v2",
72
+ >>> squad_v2_format=True,
73
+ >>> )
74
+ ```
75
+ """
76
+
77
+
78
+ class QuestionAnsweringEvaluator(Evaluator):
79
+ """
80
+ Question answering evaluator. This evaluator handles
81
+ [**extractive** question answering](https://huggingface.co/docs/transformers/task_summary#extractive-question-answering),
82
+ where the answer to the question is extracted from a context.
83
+
84
+ This question answering evaluator can currently be loaded from [`evaluator`] using the default task name
85
+ `question-answering`.
86
+
87
+ Methods in this class assume a data format compatible with the
88
+ [`~transformers.QuestionAnsweringPipeline`].
89
+ """
90
+
91
+ PIPELINE_KWARGS = {}
92
+
93
+ def __init__(self, task="question-answering", default_metric_name=None):
94
+ super().__init__(task, default_metric_name=default_metric_name)
95
+
96
+ def prepare_data(
97
+ self, data: Dataset, question_column: str, context_column: str, id_column: str, label_column: str
98
+ ):
99
+ """Prepare data."""
100
+ if data is None:
101
+ raise ValueError(
102
+ "Please specify a valid `data` object - either a `str` with a name or a `Dataset` object."
103
+ )
104
+ self.check_required_columns(
105
+ data,
106
+ {
107
+ "question_column": question_column,
108
+ "context_column": context_column,
109
+ "id_column": id_column,
110
+ "label_column": label_column,
111
+ },
112
+ )
113
+
114
+ metric_inputs = dict()
115
+ metric_inputs["references"] = [
116
+ {"id": element[id_column], "answers": element[label_column]} for element in data
117
+ ]
118
+
119
+ return metric_inputs, {
120
+ "question": DatasetColumn(data, question_column),
121
+ "context": DatasetColumn(data, context_column),
122
+ }
123
+
124
+ def is_squad_v2_format(self, data: Dataset, label_column: str = "answers"):
125
+ """
126
+ Check if the provided dataset follows the squad v2 data schema, namely possible samples where the answer is not in the context.
127
+ In this case, the answer text list should be `[]`.
128
+ """
129
+ original_num_rows = data.num_rows
130
+ nonempty_num_rows = data.filter(
131
+ lambda x: len(x[label_column]["text"]) > 0, load_from_cache_file=False
132
+ ).num_rows
133
+ if original_num_rows > nonempty_num_rows:
134
+ return True
135
+ else:
136
+ return False
137
+
138
+ def predictions_processor(self, predictions: List, squad_v2_format: bool, ids: List):
139
+ result = []
140
+ for i in range(len(predictions)):
141
+ pred = {"prediction_text": predictions[i]["answer"], "id": ids[i]}
142
+ if squad_v2_format:
143
+ pred["no_answer_probability"] = predictions[i]["score"]
144
+ result.append(pred)
145
+ return {"predictions": result}
146
+
147
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
148
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
149
+ def compute(
150
+ self,
151
+ model_or_pipeline: Union[
152
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
153
+ ] = None,
154
+ data: Union[str, Dataset] = None,
155
+ subset: Optional[str] = None,
156
+ split: Optional[str] = None,
157
+ metric: Union[str, EvaluationModule] = None,
158
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
159
+ strategy: Literal["simple", "bootstrap"] = "simple",
160
+ confidence_level: float = 0.95,
161
+ n_resamples: int = 9999,
162
+ device: int = None,
163
+ random_state: Optional[int] = None,
164
+ question_column: str = "question",
165
+ context_column: str = "context",
166
+ id_column: str = "id",
167
+ label_column: str = "answers",
168
+ squad_v2_format: Optional[bool] = None,
169
+ ) -> Tuple[Dict[str, float], Any]:
170
+ """
171
+ question_column (`str`, defaults to `"question"`):
172
+ The name of the column containing the question in the dataset specified by `data`.
173
+ context_column (`str`, defaults to `"context"`):
174
+ The name of the column containing the context in the dataset specified by `data`.
175
+ id_column (`str`, defaults to `"id"`):
176
+ The name of the column containing the identification field of the question and answer pair in the
177
+ dataset specified by `data`.
178
+ label_column (`str`, defaults to `"answers"`):
179
+ The name of the column containing the answers in the dataset specified by `data`.
180
+ squad_v2_format (`bool`, *optional*, defaults to `None`):
181
+ Whether the dataset follows the format of squad_v2 dataset. This is the case when the provided dataset
182
+ has questions where the answer is not in the context, more specifically when are answers as
183
+ `{"text": [], "answer_start": []}` in the answer column. If all questions have at least one answer, this parameter
184
+ should be set to `False`. If this parameter is not provided, the format will be automatically inferred.
185
+ """
186
+ result = {}
187
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
188
+
189
+ data = self.load_data(data=data, subset=subset, split=split)
190
+ metric_inputs, pipe_inputs = self.prepare_data(
191
+ data=data,
192
+ question_column=question_column,
193
+ context_column=context_column,
194
+ id_column=id_column,
195
+ label_column=label_column,
196
+ )
197
+
198
+ if squad_v2_format is None:
199
+ squad_v2_format = self.is_squad_v2_format(data=data, label_column=label_column)
200
+ logger.warning(
201
+ f"`squad_v2_format` parameter not provided to QuestionAnsweringEvaluator.compute(). Automatically inferred `squad_v2_format` as {squad_v2_format}."
202
+ )
203
+ pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
204
+
205
+ metric = self.prepare_metric(metric)
206
+
207
+ if squad_v2_format and metric.name == "squad":
208
+ logger.warning(
209
+ "The dataset has SQuAD v2 format but you are using the SQuAD metric. Consider passing the 'squad_v2' metric."
210
+ )
211
+ if not squad_v2_format and metric.name == "squad_v2":
212
+ logger.warning(
213
+ "The dataset has SQuAD v1 format but you are using the SQuAD v2 metric. Consider passing the 'squad' metric."
214
+ )
215
+
216
+ if squad_v2_format:
217
+ self.PIPELINE_KWARGS["handle_impossible_answer"] = True
218
+ else:
219
+ self.PIPELINE_KWARGS["handle_impossible_answer"] = False
220
+
221
+ # Compute predictions
222
+ predictions, perf_results = self.call_pipeline(pipe, **pipe_inputs)
223
+ predictions = self.predictions_processor(predictions, squad_v2_format=squad_v2_format, ids=data[id_column])
224
+ metric_inputs.update(predictions)
225
+
226
+ # Compute metrics from references and predictions
227
+ metric_results = self.compute_metric(
228
+ metric=metric,
229
+ metric_inputs=metric_inputs,
230
+ strategy=strategy,
231
+ confidence_level=confidence_level,
232
+ n_resamples=n_resamples,
233
+ random_state=random_state,
234
+ )
235
+
236
+ result.update(metric_results)
237
+ result.update(perf_results)
238
+
239
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/text2text_generation.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Union
16
+
17
+ from datasets import Dataset
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
27
+
28
+
29
+ TASK_DOCUMENTATION_KWARGS = r"""
30
+ input_column (`str`, defaults to `"text"`):
31
+ the name of the column containing the input text in the dataset specified by `data`.
32
+ label_column (`str`, defaults to `"label"`):
33
+ the name of the column containing the labels in the dataset specified by `data`.
34
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
35
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
36
+ """
37
+
38
+ TEXT2TEXT_TASK_DOCSTRING_EXAMPLE = r"""
39
+ Examples:
40
+ ```python
41
+ >>> from evaluate import evaluator
42
+ >>> from datasets import load_dataset
43
+ >>> task_evaluator = evaluator("text2text-generation")
44
+ >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
45
+ >>> results = task_evaluator.compute(
46
+ >>> model_or_pipeline="facebook/bart-large-cnn",
47
+ >>> data=data,
48
+ >>> input_column="article",
49
+ >>> label_column="highlights",
50
+ >>> metric="rouge",
51
+ >>> )
52
+ ```
53
+ """
54
+
55
+ SUMMARIZATION_TASK_DOCSTRING_EXAMPLE = r"""
56
+ Examples:
57
+ ```python
58
+ >>> from evaluate import evaluator
59
+ >>> from datasets import load_dataset
60
+ >>> task_evaluator = evaluator("summarization")
61
+ >>> data = load_dataset("cnn_dailymail", "3.0.0", split="validation[:40]")
62
+ >>> results = task_evaluator.compute(
63
+ >>> model_or_pipeline="facebook/bart-large-cnn",
64
+ >>> data=data,
65
+ >>> input_column="article",
66
+ >>> label_column="highlights",
67
+ >>> )
68
+ ```
69
+ """
70
+
71
+
72
+ TRANSLATION_TASK_DOCSTRING_EXAMPLE = r"""
73
+ Examples:
74
+ ```python
75
+ >>> from evaluate import evaluator
76
+ >>> from datasets import load_dataset
77
+ >>> task_evaluator = evaluator("translation")
78
+ >>> data = load_dataset("wmt19", "fr-de", split="validation[:40]")
79
+ >>> data = data.map(lambda x: {"text": x["translation"]["de"], "label": x["translation"]["fr"]})
80
+ >>> results = task_evaluator.compute(
81
+ >>> model_or_pipeline="Helsinki-NLP/opus-mt-de-fr",
82
+ >>> data=data,
83
+ >>> )
84
+ ```
85
+ """
86
+
87
+
88
+ class Text2TextGenerationEvaluator(Evaluator):
89
+ """
90
+ Text2Text generation evaluator.
91
+ This Text2Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
92
+ `text2text-generation`.
93
+ Methods in this class assume a data format compatible with the [`~transformers.Text2TextGenerationPipeline`].
94
+ """
95
+
96
+ PREDICTION_PREFIX = "generated"
97
+ PIPELINE_KWARGS = {"truncation": True}
98
+
99
+ def __init__(self, task="text2text-generation", default_metric_name=None):
100
+ super().__init__(task, default_metric_name=default_metric_name)
101
+
102
+ def predictions_processor(self, predictions, label_mapping):
103
+ return {"predictions": [pred[f"{self.PREDICTION_PREFIX}_text"] for pred in predictions]}
104
+
105
+ @add_start_docstrings(
106
+ EVALUTOR_COMPUTE_START_DOCSTRING,
107
+ TASK_DOCUMENTATION_KWARGS,
108
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
109
+ TEXT2TEXT_TASK_DOCSTRING_EXAMPLE,
110
+ )
111
+ def compute(
112
+ self,
113
+ model_or_pipeline: Union[
114
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
115
+ ] = None,
116
+ data: Union[str, Dataset] = None,
117
+ subset: Optional[str] = None,
118
+ split: Optional[str] = None,
119
+ metric: Union[str, EvaluationModule] = None,
120
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
121
+ strategy: Literal["simple", "bootstrap"] = "simple",
122
+ confidence_level: float = 0.95,
123
+ n_resamples: int = 9999,
124
+ device: int = None,
125
+ random_state: Optional[int] = None,
126
+ input_column: str = "text",
127
+ label_column: str = "label",
128
+ generation_kwargs: dict = None,
129
+ ) -> Tuple[Dict[str, float], Any]:
130
+ if generation_kwargs is not None:
131
+ self.PIPELINE_KWARGS.update(generation_kwargs)
132
+
133
+ result = super().compute(
134
+ model_or_pipeline=model_or_pipeline,
135
+ data=data,
136
+ subset=subset,
137
+ split=split,
138
+ metric=metric,
139
+ tokenizer=tokenizer,
140
+ strategy=strategy,
141
+ confidence_level=confidence_level,
142
+ n_resamples=n_resamples,
143
+ device=device,
144
+ random_state=random_state,
145
+ input_column=input_column,
146
+ label_column=label_column,
147
+ )
148
+
149
+ return result
150
+
151
+
152
+ class SummarizationEvaluator(Text2TextGenerationEvaluator):
153
+ """
154
+ Text summarization evaluator.
155
+ This text summarization evaluator can currently be loaded from [`evaluator`] using the default task name
156
+ `summarization`.
157
+ Methods in this class assume a data format compatible with the [`SummarizationEvaluator`].
158
+ """
159
+
160
+ PREDICTION_PREFIX = "summary"
161
+ PIPELINE_KWARGS = {"truncation": True}
162
+
163
+ def __init__(self, task="summarization", default_metric_name=None):
164
+ super().__init__(task, default_metric_name=default_metric_name)
165
+
166
+ @add_start_docstrings(
167
+ EVALUTOR_COMPUTE_START_DOCSTRING,
168
+ TASK_DOCUMENTATION_KWARGS,
169
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
170
+ SUMMARIZATION_TASK_DOCSTRING_EXAMPLE,
171
+ )
172
+ def compute(
173
+ self,
174
+ model_or_pipeline: Union[
175
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
176
+ ] = None,
177
+ data: Union[str, Dataset] = None,
178
+ subset: Optional[str] = None,
179
+ split: Optional[str] = None,
180
+ metric: Union[str, EvaluationModule] = None,
181
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
182
+ strategy: Literal["simple", "bootstrap"] = "simple",
183
+ confidence_level: float = 0.95,
184
+ n_resamples: int = 9999,
185
+ device: int = None,
186
+ random_state: Optional[int] = None,
187
+ input_column: str = "text",
188
+ label_column: str = "label",
189
+ generation_kwargs: dict = None,
190
+ ) -> Tuple[Dict[str, float], Any]:
191
+ result = super().compute(
192
+ model_or_pipeline=model_or_pipeline,
193
+ data=data,
194
+ subset=subset,
195
+ split=split,
196
+ metric=metric,
197
+ tokenizer=tokenizer,
198
+ strategy=strategy,
199
+ confidence_level=confidence_level,
200
+ n_resamples=n_resamples,
201
+ device=device,
202
+ random_state=random_state,
203
+ input_column=input_column,
204
+ label_column=label_column,
205
+ generation_kwargs=generation_kwargs,
206
+ )
207
+
208
+ return result
209
+
210
+
211
+ class TranslationEvaluator(Text2TextGenerationEvaluator):
212
+ """
213
+ Translation evaluator.
214
+ This translation generation evaluator can currently be loaded from [`evaluator`] using the default task name
215
+ `translation`.
216
+ Methods in this class assume a data format compatible with the [`~transformers.TranslationPipeline`].
217
+ """
218
+
219
+ PREDICTION_PREFIX = "translation"
220
+ PIPELINE_KWARGS = {"truncation": True}
221
+
222
+ def __init__(self, task="translation", default_metric_name=None):
223
+ super().__init__(task, default_metric_name=default_metric_name)
224
+
225
+ @add_start_docstrings(
226
+ EVALUTOR_COMPUTE_START_DOCSTRING,
227
+ TASK_DOCUMENTATION_KWARGS,
228
+ EVALUATOR_COMPUTE_RETURN_DOCSTRING,
229
+ TRANSLATION_TASK_DOCSTRING_EXAMPLE,
230
+ )
231
+ def compute(
232
+ self,
233
+ model_or_pipeline: Union[
234
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
235
+ ] = None,
236
+ data: Union[str, Dataset] = None,
237
+ subset: Optional[str] = None,
238
+ split: Optional[str] = None,
239
+ metric: Union[str, EvaluationModule] = None,
240
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
241
+ strategy: Literal["simple", "bootstrap"] = "simple",
242
+ confidence_level: float = 0.95,
243
+ n_resamples: int = 9999,
244
+ device: int = None,
245
+ random_state: Optional[int] = None,
246
+ input_column: str = "text",
247
+ label_column: str = "label",
248
+ generation_kwargs: dict = None,
249
+ ) -> Tuple[Dict[str, float], Any]:
250
+ result = super().compute(
251
+ model_or_pipeline=model_or_pipeline,
252
+ data=data,
253
+ subset=subset,
254
+ split=split,
255
+ metric=metric,
256
+ tokenizer=tokenizer,
257
+ strategy=strategy,
258
+ confidence_level=confidence_level,
259
+ n_resamples=n_resamples,
260
+ device=device,
261
+ random_state=random_state,
262
+ input_column=input_column,
263
+ label_column=label_column,
264
+ generation_kwargs=generation_kwargs,
265
+ )
266
+
267
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/text_generation.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import Dict, Tuple
16
+
17
+ from datasets import Dataset
18
+
19
+ from .base import Evaluator
20
+ from .utils import DatasetColumn
21
+
22
+
23
+ TASK_DOCUMENTATION_KWARGS = r"""
24
+ input_column (`str`, defaults to `"text"`):
25
+ the name of the column containing the input text in the dataset specified by `data`.
26
+ generation_kwargs (`Dict`, *optional*, defaults to `None`):
27
+ The generation kwargs are passed to the pipeline and set the text generation strategy.
28
+ """
29
+
30
+
31
+ class TextGenerationEvaluator(Evaluator):
32
+ """
33
+ Text generation evaluator.
34
+ This Text generation evaluator can currently be loaded from [`evaluator`] using the default task name
35
+ `text-generation`.
36
+ Methods in this class assume a data format compatible with the [`~transformers.TextGenerationPipeline`].
37
+ """
38
+
39
+ def predictions_processor(self, predictions, *args, **kwargs):
40
+ """
41
+ Args:
42
+ predictions: A list of lists of dicts
43
+
44
+ Returns:
45
+ `dict`: All the generated texts are flattened and stored under the "data" key.
46
+ """
47
+ return {"data": [pred[f"{self.predictions_prefix}_text"] for pred_list in predictions for pred in pred_list]}
48
+
49
+ def __init__(self, task="text-generation", default_metric_name=None, predictions_prefix: str = "generated"):
50
+ super().__init__(task=task, default_metric_name=default_metric_name)
51
+ self.predictions_prefix = predictions_prefix
52
+
53
+ def prepare_data(self, data: Dataset, input_column: str, *args, **kwargs) -> Tuple[Dict, DatasetColumn]:
54
+ """
55
+ Prepare data.
56
+
57
+ Args:
58
+ data ([`Dataset`]):
59
+ Specifies the dataset we will run evaluation on.
60
+ input_column (`str`, defaults to `"text"`):
61
+ The name of the column containing the text feature in the dataset specified by `data`.
62
+ Returns:
63
+ `dict`: metric inputs.
64
+ `list`: pipeline inputs.
65
+ """
66
+
67
+ self.check_required_columns(data, {"input_column": input_column})
68
+
69
+ return {}, DatasetColumn(data, input_column)
venv/lib/python3.10/site-packages/evaluate/evaluator/token_classification.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Evaluate Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
16
+
17
+ from datasets import ClassLabel, Dataset, Sequence
18
+ from typing_extensions import Literal
19
+
20
+ from ..module import EvaluationModule
21
+ from ..utils.file_utils import add_end_docstrings, add_start_docstrings
22
+ from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
23
+ from .utils import DatasetColumn
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
28
+
29
+
30
+ TASK_DOCUMENTATION = r"""
31
+ The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported.
32
+
33
+ Examples:
34
+ ```python
35
+ >>> from evaluate import evaluator
36
+ >>> from datasets import load_dataset
37
+ >>> task_evaluator = evaluator("token-classification")
38
+ >>> data = load_dataset("conll2003", split="validation[:2]")
39
+ >>> results = task_evaluator.compute(
40
+ >>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english",
41
+ >>> data=data,
42
+ >>> metric="seqeval",
43
+ >>> )
44
+ ```
45
+
46
+ <Tip>
47
+
48
+ For example, the following dataset format is accepted by the evaluator:
49
+
50
+ ```python
51
+ dataset = Dataset.from_dict(
52
+ mapping={
53
+ "tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]],
54
+ "ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]],
55
+ },
56
+ features=Features({
57
+ "tokens": Sequence(feature=Value(dtype="string")),
58
+ "ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])),
59
+ }),
60
+ )
61
+ ```
62
+
63
+ </Tip>
64
+
65
+ <Tip warning={true}>
66
+
67
+ For example, the following dataset format is **not** accepted by the evaluator:
68
+
69
+ ```python
70
+ dataset = Dataset.from_dict(
71
+ mapping={
72
+ "tokens": [["New York is a city and Felix a person."]],
73
+ "starts": [[0, 23]],
74
+ "ends": [[7, 27]],
75
+ "ner_tags": [["LOC", "PER"]],
76
+ },
77
+ features=Features({
78
+ "tokens": Value(dtype="string"),
79
+ "starts": Sequence(feature=Value(dtype="int32")),
80
+ "ends": Sequence(feature=Value(dtype="int32")),
81
+ "ner_tags": Sequence(feature=Value(dtype="string")),
82
+ }),
83
+ )
84
+ ```
85
+
86
+ </Tip>
87
+ """
88
+
89
+
90
+ class TokenClassificationEvaluator(Evaluator):
91
+ """
92
+ Token classification evaluator.
93
+
94
+ This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
95
+ `token-classification`.
96
+
97
+ Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
98
+ """
99
+
100
+ PIPELINE_KWARGS = {"ignore_labels": []}
101
+
102
+ def __init__(self, task="token-classification", default_metric_name=None):
103
+ super().__init__(task, default_metric_name=default_metric_name)
104
+
105
+ def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
106
+ """
107
+ Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
108
+
109
+ Args:
110
+ predictions (`List[List[Dict]]`):
111
+ List of pipeline predictions, where each token has been labeled.
112
+ words (`List[List[str]]`):
113
+ Original input data to the pipeline, used to build predicted labels of the same length.
114
+ join_by (`str`):
115
+ String to use to join two words. In English, it will typically be " ".
116
+
117
+ Returns:
118
+ `dict`: a dictionary holding the predictions
119
+ """
120
+ preds = []
121
+
122
+ # iterate over the data rows
123
+ for i, prediction in enumerate(predictions):
124
+ pred_processed = []
125
+
126
+ # get a list of tuples giving the indexes of the start and end character of each word
127
+ words_offsets = self.words_to_offsets(words[i], join_by)
128
+
129
+ token_index = 0
130
+ for word_offset in words_offsets:
131
+ # for each word, we may keep only the predicted label for the first token, discard the others
132
+ while prediction[token_index]["start"] < word_offset[0]:
133
+ token_index += 1
134
+
135
+ if prediction[token_index]["start"] > word_offset[0]: # bad indexing
136
+ pred_processed.append("O")
137
+ elif prediction[token_index]["start"] == word_offset[0]:
138
+ pred_processed.append(prediction[token_index]["entity"])
139
+
140
+ preds.append(pred_processed)
141
+
142
+ return {"predictions": preds}
143
+
144
+ def words_to_offsets(self, words: List[str], join_by: str):
145
+ """
146
+ Convert a list of words to a list of offsets, where word are joined by `join_by`.
147
+
148
+ Args:
149
+ words (`List[str]`):
150
+ List of words to get offsets from.
151
+ join_by (`str`):
152
+ String to insert between words.
153
+
154
+ Returns:
155
+ `List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
156
+ """
157
+ offsets = []
158
+
159
+ start = 0
160
+ for word in words:
161
+ end = start + len(word) - 1
162
+ offsets.append((start, end))
163
+ start = end + len(join_by) + 1
164
+
165
+ return offsets
166
+
167
+ def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
168
+ super().prepare_data(data, input_column, label_column)
169
+
170
+ if not isinstance(data.features[input_column], Sequence) or not isinstance(
171
+ data.features[label_column], Sequence
172
+ ):
173
+ raise ValueError(
174
+ "TokenClassificationEvaluator expects the input and label columns to be provided as lists."
175
+ )
176
+
177
+ # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
178
+ # Otherwise, we have to get the list of labels manually.
179
+ labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
180
+ if labels_are_int:
181
+ label_list = data.features[label_column].feature.names # list of string labels
182
+ id_to_label = {i: label for i, label in enumerate(label_list)}
183
+ references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
184
+ elif data.features[label_column].feature.dtype.startswith("int"):
185
+ raise NotImplementedError(
186
+ "References provided as integers, but the reference column is not a Sequence of ClassLabels."
187
+ )
188
+ else:
189
+ # In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
190
+ # An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
191
+ references = data[label_column]
192
+
193
+ metric_inputs = {"references": references}
194
+ data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
195
+ pipeline_inputs = DatasetColumn(data, input_column)
196
+
197
+ return metric_inputs, pipeline_inputs
198
+
199
+ def prepare_pipeline(
200
+ self,
201
+ model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
202
+ tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
203
+ feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
204
+ device: int = None,
205
+ ):
206
+ pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
207
+
208
+ # check the pipeline outputs start characters in its predictions
209
+ dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
210
+ if dummy_output[0][0]["start"] is None:
211
+ raise ValueError(
212
+ "TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
213
+ "Transformers pipelines with a slow tokenizer will raise this error."
214
+ )
215
+
216
+ return pipe
217
+
218
+ @add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
219
+ @add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
220
+ def compute(
221
+ self,
222
+ model_or_pipeline: Union[
223
+ str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
224
+ ] = None,
225
+ data: Union[str, Dataset] = None,
226
+ subset: Optional[str] = None,
227
+ split: str = None,
228
+ metric: Union[str, EvaluationModule] = None,
229
+ tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
230
+ strategy: Literal["simple", "bootstrap"] = "simple",
231
+ confidence_level: float = 0.95,
232
+ n_resamples: int = 9999,
233
+ device: Optional[int] = None,
234
+ random_state: Optional[int] = None,
235
+ input_column: str = "tokens",
236
+ label_column: str = "ner_tags",
237
+ join_by: Optional[str] = " ",
238
+ ) -> Tuple[Dict[str, float], Any]:
239
+ """
240
+ input_column (`str`, defaults to `"tokens"`):
241
+ The name of the column containing the tokens feature in the dataset specified by `data`.
242
+ label_column (`str`, defaults to `"label"`):
243
+ The name of the column containing the labels in the dataset specified by `data`.
244
+ join_by (`str`, *optional*, defaults to `" "`):
245
+ This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
246
+ words to generate a string input. This is especially useful for languages that do not separate words by a space.
247
+ """
248
+ result = {}
249
+
250
+ self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
251
+
252
+ # Prepare inputs
253
+ data = self.load_data(data=data, subset=subset, split=split)
254
+ metric_inputs, pipe_inputs = self.prepare_data(
255
+ data=data, input_column=input_column, label_column=label_column, join_by=join_by
256
+ )
257
+ pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
258
+ metric = self.prepare_metric(metric)
259
+
260
+ # Compute predictions
261
+ predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
262
+ predictions = self.predictions_processor(predictions, data[input_column], join_by)
263
+ metric_inputs.update(predictions)
264
+
265
+ # Compute metrics from references and predictions
266
+ metric_results = self.compute_metric(
267
+ metric=metric,
268
+ metric_inputs=metric_inputs,
269
+ strategy=strategy,
270
+ confidence_level=confidence_level,
271
+ n_resamples=n_resamples,
272
+ random_state=random_state,
273
+ )
274
+
275
+ result.update(metric_results)
276
+ result.update(perf_results)
277
+
278
+ return result
venv/lib/python3.10/site-packages/evaluate/evaluator/utils.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import Dataset, get_dataset_split_names
2
+
3
+
4
+ class DatasetColumn(list):
5
+ """Helper class to avoid loading a dataset column into memory when accessing it."""
6
+
7
+ def __init__(self, dataset: Dataset, key: str):
8
+ self.dataset = dataset
9
+ self.key = key
10
+
11
+ def __len__(self):
12
+ return len(self.dataset)
13
+
14
+ def __getitem__(self, i):
15
+ return self.dataset[i][self.key]
16
+
17
+ def __iter__(self):
18
+ return (self.dataset[i][self.key] for i in range(len(self)))
19
+
20
+
21
+ def choose_split(data, subset=None):
22
+ available_splits = get_dataset_split_names(data, subset)
23
+ preferred_split_order = [
24
+ "test",
25
+ "testing",
26
+ "eval",
27
+ "evaluation",
28
+ "validation",
29
+ "val",
30
+ "valid",
31
+ "dev",
32
+ "train",
33
+ "training",
34
+ ]
35
+ for split in preferred_split_order:
36
+ if split in available_splits:
37
+ return split
38
+ raise ValueError("No dataset split defined! Pass an explicit value to the `split` kwarg.")
39
+
40
+
41
+ class DatasetColumnPair(list):
42
+ """Helper class to avoid loading two dataset columns into memory when accessing it."""
43
+
44
+ def __init__(
45
+ self,
46
+ dataset: Dataset,
47
+ first_col: str,
48
+ second_col: str,
49
+ first_key: str,
50
+ second_key: str,
51
+ ):
52
+ """
53
+ Args:
54
+ dataset (Dataset): dataset to build an iterator on
55
+ first_col (str): first column name to use in the dataset
56
+ second_col (str): second column name to use in the dataset
57
+ first_key (str): key name used for the first column in the returned dictionary
58
+ second_key (str): key name used for the second column in the returned dictionary
59
+ """
60
+ self.dataset = dataset
61
+
62
+ self.first_col = first_col
63
+ self.second_col = second_col
64
+
65
+ self.first_key = first_key
66
+ self.second_key = second_key
67
+
68
+ def __len__(self):
69
+ return len(self.dataset)
70
+
71
+ def __getitem__(self, i):
72
+ return {
73
+ self.first_key: self.dataset[i][self.first_col],
74
+ self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
75
+ }
76
+
77
+ def __iter__(self):
78
+ return (
79
+ {
80
+ self.first_key: self.dataset[i][self.first_col],
81
+ self.second_key: self.dataset[i][self.second_col] if self.second_col else None,
82
+ }
83
+ for i in range(len(self))
84
+ )
venv/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (37.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/_VF.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This makes the functions in torch._C._VariableFunctions available as
3
+ torch._VF.<funcname>
4
+ without mypy being able to find them.
5
+
6
+ A subset of those functions are mapped to ATen functions in
7
+ torch/jit/_builtins.py
8
+
9
+ See https://github.com/pytorch/pytorch/issues/21478 for the reason for
10
+ introducing torch._VF
11
+
12
+ """
13
+ import sys
14
+ import types
15
+
16
+ import torch
17
+
18
+
19
+ class VFModule(types.ModuleType):
20
+ vf: types.ModuleType
21
+
22
+ def __init__(self, name):
23
+ super().__init__(name)
24
+ self.vf = torch._C._VariableFunctions
25
+
26
+ def __getattr__(self, attr):
27
+ return getattr(self.vf, attr)
28
+
29
+
30
+ sys.modules[__name__] = VFModule(__name__)
venv/lib/python3.10/site-packages/torch/_VF.pyi ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/__config__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def show():
5
+ """
6
+ Return a human-readable string with descriptions of the
7
+ configuration of PyTorch.
8
+ """
9
+ return torch._C._show_config()
10
+
11
+
12
+ # TODO: In principle, we could provide more structured version/config
13
+ # information here. For now only CXX_FLAGS is exposed, as Timer
14
+ # uses them.
15
+ def _cxx_flags():
16
+ """Returns the CXX_FLAGS used when building PyTorch."""
17
+ return torch._C._cxx_flags()
18
+
19
+
20
+ def parallel_info():
21
+ r"""Returns detailed string with parallelization settings"""
22
+ return torch._C._parallel_info()
venv/lib/python3.10/site-packages/torch/__future__.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _overwrite_module_params_on_conversion: bool = False
2
+ _swap_module_params_on_conversion: bool = False
3
+
4
+
5
+ def set_overwrite_module_params_on_conversion(value: bool) -> None:
6
+ """
7
+ Sets whether to assign new tensors to the parameters instead of changing the
8
+ existing parameters in-place when converting an ``nn.Module``.
9
+
10
+ When enabled, the following methods will assign new parameters to the module:
11
+
12
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
13
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
14
+ #. :meth:`nn.Module.to`
15
+ #. :meth:`nn.Module.to_empty`
16
+
17
+ Args:
18
+ value (bool): Whether to assign new tensors or not.
19
+
20
+ """
21
+ global _overwrite_module_params_on_conversion
22
+ _overwrite_module_params_on_conversion = value
23
+
24
+
25
+ def get_overwrite_module_params_on_conversion() -> bool:
26
+ """
27
+ Returns whether to assign new tensors to the parameters instead of changing the
28
+ existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``.
29
+
30
+ See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information.
31
+ """
32
+ return _overwrite_module_params_on_conversion
33
+
34
+
35
+ def set_swap_module_params_on_conversion(value: bool) -> None:
36
+ """
37
+ Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to
38
+ change the existing parameters in-place when converting an ``nn.Module`` and instead
39
+ of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``.
40
+
41
+ .. note::
42
+ This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion`
43
+
44
+ When enabled, the following methods will swap the existing parameters in-place:
45
+
46
+ #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
47
+ #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
48
+ #. :meth:`nn.Module.to`
49
+ #. :meth:`nn.Module.to_empty`
50
+ #. :meth:`nn.Module.load_state_dict`
51
+
52
+ The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows:
53
+
54
+ #. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via
55
+ :meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``)
56
+ #. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter`
57
+ #. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors`
58
+ with ``res``
59
+
60
+ Args:
61
+ value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not.
62
+
63
+ """
64
+ global _swap_module_params_on_conversion
65
+ _swap_module_params_on_conversion = value
66
+
67
+
68
+ def get_swap_module_params_on_conversion() -> bool:
69
+ """
70
+ Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to
71
+ change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``.
72
+
73
+ See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information.
74
+ """
75
+ return _swap_module_params_on_conversion
venv/lib/python3.10/site-packages/torch/__init__.py ADDED
@@ -0,0 +1,2038 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ r"""
3
+ The torch package contains data structures for multi-dimensional
4
+ tensors and defines mathematical operations over these tensors.
5
+ Additionally, it provides many utilities for efficient serialization of
6
+ Tensors and arbitrary types, and other useful utilities.
7
+
8
+ It has a CUDA counterpart, that enables you to run your tensor computations
9
+ on an NVIDIA GPU with compute capability >= 3.0.
10
+ """
11
+
12
+ import math
13
+ import os
14
+ import sys
15
+ import platform
16
+ import textwrap
17
+ import ctypes
18
+ import inspect
19
+ import threading
20
+
21
+ # multipy/deploy is setting this import before importing torch, this is the most
22
+ # reliable way we have to detect if we're running within deploy.
23
+ # https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
24
+ def _running_with_deploy():
25
+ return sys.modules.get("torch._meta_registrations", None) is object
26
+
27
+ from ._utils import _import_dotted_name, classproperty
28
+ from ._utils import _functionalize_sync as _sync
29
+ from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
30
+ USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
31
+
32
+ # TODO(torch_deploy) figure out how to freeze version.py in fbcode build
33
+ if _running_with_deploy():
34
+ __version__ = "torch-deploy-1.8"
35
+ else:
36
+ from .torch_version import __version__ as __version__
37
+
38
+ from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List
39
+ import builtins
40
+
41
+ __all__ = [
42
+ 'typename', 'is_tensor', 'is_storage',
43
+ 'set_default_tensor_type', 'set_default_device', 'get_default_device',
44
+ 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
45
+ 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
46
+ 'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
47
+ 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
48
+ 'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
49
+ 'TypedStorage', 'UntypedStorage',
50
+ 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
51
+ 'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
52
+ 'lobpcg', 'use_deterministic_algorithms',
53
+ 'are_deterministic_algorithms_enabled',
54
+ 'is_deterministic_algorithms_warn_only_enabled',
55
+ 'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
56
+ 'set_float32_matmul_precision', 'get_float32_matmul_precision',
57
+ 'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
58
+ 'SymBool', 'sym_not', 'unravel_index',
59
+ 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
60
+ 'export', 'autocast', 'cond', 'GradScaler',
61
+ ]
62
+
63
+ ################################################################################
64
+ # Load the extension module
65
+ ################################################################################
66
+
67
+ if sys.platform == 'win32':
68
+ pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
69
+ py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
70
+ th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
71
+
72
+ # When users create a virtualenv that inherits the base environment,
73
+ # we will need to add the corresponding library directory into
74
+ # DLL search directories. Otherwise, it will rely on `PATH` which
75
+ # is dependent on user settings.
76
+ if sys.exec_prefix != sys.base_exec_prefix:
77
+ base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
78
+ else:
79
+ base_py_dll_path = ''
80
+
81
+ dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
82
+
83
+ if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths):
84
+ nvtoolsext_dll_path = os.path.join(
85
+ os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
86
+ else:
87
+ nvtoolsext_dll_path = ''
88
+
89
+ from .version import cuda as cuda_version
90
+ import glob
91
+ if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths):
92
+ cuda_version_1 = cuda_version.replace('.', '_')
93
+ cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
94
+ default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
95
+ cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
96
+ else:
97
+ cuda_path = ''
98
+
99
+ dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
100
+
101
+ kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
102
+ with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
103
+ prev_error_mode = kernel32.SetErrorMode(0x0001)
104
+
105
+ kernel32.LoadLibraryW.restype = ctypes.c_void_p
106
+ if with_load_library_flags:
107
+ kernel32.LoadLibraryExW.restype = ctypes.c_void_p
108
+
109
+ for dll_path in dll_paths:
110
+ os.add_dll_directory(dll_path)
111
+
112
+ try:
113
+ ctypes.CDLL('vcruntime140.dll')
114
+ ctypes.CDLL('msvcp140.dll')
115
+ ctypes.CDLL('vcruntime140_1.dll')
116
+ except OSError:
117
+ print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
118
+ It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
119
+
120
+ dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
121
+ path_patched = False
122
+ for dll in dlls:
123
+ is_loaded = False
124
+ if with_load_library_flags:
125
+ res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
126
+ last_error = ctypes.get_last_error()
127
+ if res is None and last_error != 126:
128
+ err = ctypes.WinError(last_error)
129
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
130
+ raise err
131
+ elif res is not None:
132
+ is_loaded = True
133
+ if not is_loaded:
134
+ if not path_patched:
135
+ os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
136
+ path_patched = True
137
+ res = kernel32.LoadLibraryW(dll)
138
+ if res is None:
139
+ err = ctypes.WinError(ctypes.get_last_error())
140
+ err.strerror += f' Error loading "{dll}" or one of its dependencies.'
141
+ raise err
142
+
143
+ kernel32.SetErrorMode(prev_error_mode)
144
+
145
+
146
+ def _preload_cuda_deps(lib_folder, lib_name):
147
+ """Preloads cuda deps if they could not be found otherwise."""
148
+ # Should only be called on Linux if default path resolution have failed
149
+ assert platform.system() == 'Linux', 'Should only be called on Linux'
150
+ import glob
151
+ lib_path = None
152
+ for path in sys.path:
153
+ nvidia_path = os.path.join(path, 'nvidia')
154
+ if not os.path.exists(nvidia_path):
155
+ continue
156
+ candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name))
157
+ if candidate_lib_paths and not lib_path:
158
+ lib_path = candidate_lib_paths[0]
159
+ if lib_path:
160
+ break
161
+ if not lib_path:
162
+ raise ValueError(f"{lib_name} not found in the system path {sys.path}")
163
+ ctypes.CDLL(lib_path)
164
+
165
+
166
+ # See Note [Global dependencies]
167
+ def _load_global_deps() -> None:
168
+ if _running_with_deploy() or platform.system() == 'Windows':
169
+ return
170
+
171
+ lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
172
+ here = os.path.abspath(__file__)
173
+ lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
174
+
175
+ try:
176
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
177
+ except OSError as err:
178
+ # Can only happen for wheel with cuda libs as PYPI deps
179
+ # As PyTorch is not purelib, but nvidia-*-cu12 is
180
+ cuda_libs: Dict[str, str] = {
181
+ 'cublas': 'libcublas.so.*[0-9]',
182
+ 'cudnn': 'libcudnn.so.*[0-9]',
183
+ 'cuda_nvrtc': 'libnvrtc.so.*[0-9]',
184
+ 'cuda_runtime': 'libcudart.so.*[0-9]',
185
+ 'cuda_cupti': 'libcupti.so.*[0-9]',
186
+ 'cufft': 'libcufft.so.*[0-9]',
187
+ 'curand': 'libcurand.so.*[0-9]',
188
+ 'cusolver': 'libcusolver.so.*[0-9]',
189
+ 'cusparse': 'libcusparse.so.*[0-9]',
190
+ 'nccl': 'libnccl.so.*[0-9]',
191
+ 'nvtx': 'libnvToolsExt.so.*[0-9]',
192
+ }
193
+ is_cuda_lib_err = [lib for lib in cuda_libs.values() if lib.split('.')[0] in err.args[0]]
194
+ if not is_cuda_lib_err:
195
+ raise err
196
+ for lib_folder, lib_name in cuda_libs.items():
197
+ _preload_cuda_deps(lib_folder, lib_name)
198
+ ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
199
+
200
+
201
+ if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
202
+ (_running_with_deploy() or platform.system() != 'Windows'):
203
+ # Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
204
+ # few circumstances:
205
+ #
206
+ # 1. You're in a build environment (e.g., fbcode) where
207
+ # libtorch_global_deps is not available, but you still need
208
+ # to get mkl to link in with RTLD_GLOBAL or it will just
209
+ # not work.
210
+ #
211
+ # 2. You're trying to run PyTorch under UBSAN and you need
212
+ # to ensure that only one copy of libtorch is loaded, so
213
+ # vptr checks work properly
214
+ #
215
+ # If you're using this setting, you must verify that all the libraries
216
+ # you load consistently use the same libstdc++, or you may have
217
+ # mysterious segfaults.
218
+ #
219
+ old_flags = sys.getdlopenflags()
220
+ sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
221
+ from torch._C import * # noqa: F403
222
+ sys.setdlopenflags(old_flags)
223
+ del old_flags
224
+
225
+ else:
226
+ # Easy way. You want this most of the time, because it will prevent
227
+ # C++ symbols from libtorch clobbering C++ symbols from other
228
+ # libraries, leading to mysterious segfaults.
229
+ #
230
+ # If building in an environment where libtorch_global_deps isn't available
231
+ # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
232
+ # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
233
+ #
234
+ # See Note [Global dependencies]
235
+ if USE_GLOBAL_DEPS:
236
+ _load_global_deps()
237
+ from torch._C import * # noqa: F403
238
+
239
+ # Appease the type checker; ordinarily this binding is inserted by the
240
+ # torch._C module initialization code in C
241
+ if TYPE_CHECKING:
242
+ from . import _C as _C
243
+
244
+ class SymInt:
245
+ """
246
+ Like an int (including magic methods), but redirects all operations on the
247
+ wrapped node. This is used in particular to symbolically record operations
248
+ in the symbolic shape workflow.
249
+ """
250
+
251
+ def __init__(self, node):
252
+ # This field MUST be named node; C++ binding code assumes that this
253
+ # class has a field named node that stores SymNode
254
+ self.node = node
255
+
256
+ def __bool__(self):
257
+ return builtins.bool(self != 0)
258
+
259
+ def __int__(self):
260
+ return self.node.int_()
261
+
262
+ def __index__(self):
263
+ return self.node.int_()
264
+
265
+ # Magic methods installed by torch.fx.experimental.sym_node
266
+
267
+ def __eq__(self, other: object) -> builtins.bool:
268
+ raise AssertionError("type stub not overridden")
269
+
270
+ def __lt__(self, other) -> builtins.bool:
271
+ raise AssertionError("type stub not overridden")
272
+
273
+ def __gt__(self, other) -> builtins.bool:
274
+ raise AssertionError("type stub not overridden")
275
+
276
+ def __le__(self, other) -> builtins.bool:
277
+ raise AssertionError("type stub not overridden")
278
+
279
+ def __ge__(self, other) -> builtins.bool:
280
+ raise AssertionError("type stub not overridden")
281
+
282
+ def __add__(self, other) -> "SymInt":
283
+ raise AssertionError("type stub not overridden")
284
+
285
+ def __mul__(self, other) -> "SymInt":
286
+ raise AssertionError("type stub not overridden")
287
+
288
+ def __sym_max__(self, other):
289
+ raise AssertionError("type stub not overridden")
290
+
291
+ def __sym_min__(self, other):
292
+ raise AssertionError("type stub not overridden")
293
+
294
+ def __sym_float__(self):
295
+ raise AssertionError("type stub not overridden")
296
+
297
+ def __neg__(self):
298
+ raise AssertionError("type stub not overridden")
299
+
300
+ def __repr__(self):
301
+ return str(self.node)
302
+
303
+ def __hash__(self) -> builtins.int:
304
+ if self.node.is_nested_int():
305
+ return hash(self.node.nested_int())
306
+ else:
307
+ # We could support constant SymInts as well, but not doing it for now
308
+ raise TypeError("unhashable type: non-nested SymInt")
309
+
310
+ class SymFloat:
311
+ """
312
+ Like an float (including magic methods), but redirects all operations on the
313
+ wrapped node. This is used in particular to symbolically record operations
314
+ in the symbolic shape workflow.
315
+ """
316
+
317
+ def __init__(self, node):
318
+ # This field MUST be named node; C++ binding code assumes that this
319
+ # class has a field named node that stores SymNode
320
+ self.node = node
321
+
322
+ def __bool__(self):
323
+ return self.node.bool_()
324
+
325
+ # Magic methods installed by torch.fx.experimental.sym_node
326
+
327
+ def __eq__(self, other: object) -> builtins.bool:
328
+ raise AssertionError("type stub not overridden")
329
+
330
+ def __lt__(self, other) -> builtins.bool:
331
+ raise AssertionError("type stub not overridden")
332
+
333
+ def __gt__(self, other) -> builtins.bool:
334
+ raise AssertionError("type stub not overridden")
335
+
336
+ def __le__(self, other) -> builtins.bool:
337
+ raise AssertionError("type stub not overridden")
338
+
339
+ def __ge__(self, other) -> builtins.bool:
340
+ raise AssertionError("type stub not overridden")
341
+
342
+ def __sym_max__(self, other):
343
+ raise AssertionError("type stub not overridden")
344
+
345
+ def __sym_min__(self, other):
346
+ raise AssertionError("type stub not overridden")
347
+
348
+ def __sym_int__(self):
349
+ raise AssertionError("type stub not overridden")
350
+
351
+ def is_integer(self):
352
+ """Return True if the float is an integer."""
353
+ raise AssertionError("type stub not overridden")
354
+
355
+ def __repr__(self):
356
+ return self.node.str()
357
+
358
+ class SymBool:
359
+ """
360
+ Like an bool (including magic methods), but redirects all operations on the
361
+ wrapped node. This is used in particular to symbolically record operations
362
+ in the symbolic shape workflow.
363
+
364
+ Unlike regular bools, regular boolean operators will force extra guards instead
365
+ of symbolically evaluate. Use the bitwise operators instead to handle this.
366
+ """
367
+
368
+ def __init__(self, node):
369
+ # This field MUST be named node; C++ binding code assumes that this
370
+ # class has a field named node that stores SymNode
371
+ self.node = node
372
+
373
+ def __bool__(self):
374
+ return self.node.bool_()
375
+
376
+ def __int__(self):
377
+ return builtins.int(self.node.bool_())
378
+
379
+ # Magic methods installed by torch.fx.experimental.sym_node
380
+ def __and__(self, other) -> "SymBool":
381
+ raise AssertionError("type stub not overridden")
382
+
383
+ def __or__(self, other) -> "SymBool":
384
+ raise AssertionError("type stub not overridden")
385
+
386
+ # We very carefully define __sym_not__, and not a number of other
387
+ # plausible alternatives:
388
+ #
389
+ # - We do not override __not__ because this is not a real magic
390
+ # method; you cannot override the meaning of the not builtin in
391
+ # Python. We use the name 'sym_not' to clarify that in user code you
392
+ # cannot use the builtin not or operator.not_ or operator.__not__ and
393
+ # hit this magic method; you must use our custom sym_not operator.
394
+ #
395
+ # - We do not override the __invert__ method because SymBool is
396
+ # meant to be usable in situations where bool is expected. However,
397
+ # bitwise negation ~a does the wrong thing with booleans (because
398
+ # bool is a subclass of int, so ~1 = -2 which is not falseish.)
399
+ # This would be a giant footgun, so we get around it by defining
400
+ # our own operator. Note that bitwise and/or do the right thing,
401
+ # so we reuse the conventional operators there for readability.
402
+ #
403
+ def __sym_not__(self) -> "SymBool":
404
+ raise AssertionError("type stub not overridden")
405
+
406
+ def __sym_ite__(self, then_val, else_val):
407
+ raise AssertionError("type stub not overridden")
408
+
409
+ def __eq__(self, other) -> builtins.bool:
410
+ raise AssertionError("type stub not overridden")
411
+
412
+ def __repr__(self):
413
+ return str(self.node)
414
+
415
+ def __hash__(self):
416
+ if self.node.is_constant():
417
+ return hash(self.node.bool_())
418
+ else:
419
+ raise TypeError("unhashable type: SymBool")
420
+
421
+ def sym_not(a):
422
+ r""" SymInt-aware utility for logical negation.
423
+
424
+ Args:
425
+ a (SymBool or bool): Object to negate
426
+ """
427
+ import sympy
428
+ from .overrides import has_torch_function_unary, handle_torch_function
429
+
430
+ if has_torch_function_unary(a):
431
+ return handle_torch_function(sym_not, (a,), a)
432
+ if hasattr(a, '__sym_not__'):
433
+ return a.__sym_not__()
434
+ if isinstance(a, sympy.Basic):
435
+ return ~a # type: ignore[operator]
436
+ return not a
437
+
438
+ def sym_float(a):
439
+ r""" SymInt-aware utility for float casting.
440
+
441
+ Args:
442
+ a (SymInt, SymFloat, or object): Object to cast
443
+ """
444
+ from .overrides import has_torch_function_unary, handle_torch_function
445
+
446
+ if has_torch_function_unary(a):
447
+ return handle_torch_function(sym_float, (a,), a)
448
+ if isinstance(a, SymFloat):
449
+ return a
450
+ elif hasattr(a, '__sym_float__'):
451
+ return a.__sym_float__()
452
+ return py_float(a) # type: ignore[operator]
453
+
454
+
455
+ def sym_int(a):
456
+ r""" SymInt-aware utility for int casting.
457
+
458
+ Args:
459
+ a (SymInt, SymFloat, or object): Object to cast
460
+ """
461
+ from .overrides import has_torch_function_unary, handle_torch_function
462
+
463
+ if has_torch_function_unary(a):
464
+ return handle_torch_function(sym_int, (a,), a)
465
+ if isinstance(a, SymInt):
466
+ return a
467
+ elif isinstance(a, SymFloat):
468
+ return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
469
+ return py_int(a) # type: ignore[operator]
470
+
471
+ def sym_max(a, b):
472
+ """ SymInt-aware utility for max()."""
473
+ from .overrides import has_torch_function, handle_torch_function
474
+
475
+ if has_torch_function((a, b)):
476
+ return handle_torch_function(sym_max, (a, b), a, b)
477
+ if isinstance(a, (SymInt, SymFloat)):
478
+ return a.__sym_max__(b)
479
+ elif isinstance(b, (SymInt, SymFloat)):
480
+ # NB: If you actually care about preserving output type exactly
481
+ # if you do something like max(0, 0.0), it is NOT sound to treat
482
+ # min/max as commutative
483
+ return b.__sym_max__(a)
484
+ return builtins.max(a, b) # type: ignore[operator]
485
+
486
+ def sym_min(a, b):
487
+ """ SymInt-aware utility for max()."""
488
+ from .overrides import has_torch_function, handle_torch_function
489
+
490
+ if has_torch_function((a, b)):
491
+ return handle_torch_function(sym_min, (a, b), a, b)
492
+ if isinstance(a, (SymInt, SymFloat)):
493
+ return a.__sym_min__(b)
494
+ elif isinstance(b, (SymInt, SymFloat)):
495
+ return b.__sym_min__(a)
496
+ return builtins.min(a, b) # type: ignore[operator]
497
+
498
+ # Drop in replacement for math.sqrt, math.sin, math.cos etc
499
+ current_module = sys.modules[__name__]
500
+
501
+ def _get_sym_math_fn(name):
502
+ def fn(a):
503
+ from .overrides import has_torch_function_unary, handle_torch_function
504
+
505
+ if has_torch_function_unary(a):
506
+ return handle_torch_function(fn, (a,), a)
507
+ if hasattr(a, f"__sym_{name}__"):
508
+ return getattr(a, f"__sym_{name}__")()
509
+ return getattr(math, name)(a)
510
+
511
+ return fn
512
+
513
+ for name in ("sqrt", "cos", "cosh", "sin", "sinh", "tan", "tanh", "asin", "acos", "atan"):
514
+ sym_name = f"_sym_{name}"
515
+ fn = _get_sym_math_fn(name)
516
+ fn.__qualname__ = fn.__name__ = sym_name
517
+ setattr(current_module, sym_name, fn)
518
+
519
+ # Adding temporary shortcut
520
+ sym_sqrt = current_module._sym_sqrt
521
+ __all__.append("sym_sqrt")
522
+
523
+ del fn, name, sym_name, current_module # type: ignore[possibly-undefined]
524
+
525
+
526
+ def sym_ite(b, t, f):
527
+ from .overrides import has_torch_function, handle_torch_function
528
+
529
+ if has_torch_function((b, t, f)):
530
+ return handle_torch_function(sym_ite, (b, t, f), b, t, f)
531
+ assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f)
532
+ if isinstance(b, SymBool):
533
+ return b.__sym_ite__(t, f)
534
+ return t if b else f
535
+
536
+ # Check to see if we can load C extensions, and if not provide some guidance
537
+ # on what the problem might be.
538
+ try:
539
+ # _initExtension is chosen (arbitrarily) as a sentinel.
540
+ from torch._C import _initExtension
541
+ except ImportError:
542
+ import torch._C as _C_for_compiled_check
543
+
544
+ # The __file__ check only works for Python 3.7 and above.
545
+ if _C_for_compiled_check.__file__ is None:
546
+ raise ImportError(textwrap.dedent('''
547
+ Failed to load PyTorch C extensions:
548
+ It appears that PyTorch has loaded the `torch/_C` folder
549
+ of the PyTorch repository rather than the C extensions which
550
+ are expected in the `torch._C` namespace. This can occur when
551
+ using the `install` workflow. e.g.
552
+ $ python setup.py install && python -c "import torch"
553
+
554
+ This error can generally be solved using the `develop` workflow
555
+ $ python setup.py develop && python -c "import torch" # This should succeed
556
+ or by running Python from a different directory.
557
+ ''').strip()) from None
558
+ raise # If __file__ is not None the cause is unknown, so just re-raise.
559
+
560
+ for name in dir(_C):
561
+ if name[0] != '_' and not name.endswith('Base'):
562
+ __all__.append(name)
563
+ obj = getattr(_C, name)
564
+ if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
565
+ if (obj.__module__ != 'torch'):
566
+ # TODO: fix their module from C++ side
567
+ if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']:
568
+ obj.__module__ = 'torch'
569
+ elif name == 'TensorBase':
570
+ # issue 109438 / pr 109940. Prevent TensorBase from being copied into torch.
571
+ delattr(sys.modules[__name__], name)
572
+
573
+ if not TYPE_CHECKING:
574
+ # issue 38137 and python issue 43367. Submodules of a C extension are
575
+ # non-standard, and attributes of those submodules cannot be pickled since
576
+ # pickle expect to be able to import them as "from _C.sub import attr"
577
+ # which fails with "_C is not a package
578
+ for attr in dir(_C):
579
+ candidate = getattr(_C, attr)
580
+ if type(candidate) is type(_C):
581
+ # submodule
582
+ if f'torch._C.{attr}' not in sys.modules:
583
+ sys.modules[f'torch._C.{attr}'] = candidate
584
+
585
+
586
+ ################################################################################
587
+ # Define basic utilities
588
+ ################################################################################
589
+
590
+
591
+ def typename(o):
592
+ if isinstance(o, torch.Tensor):
593
+ return o.type()
594
+
595
+ module = ''
596
+ class_name = ''
597
+ if hasattr(o, '__module__') and o.__module__ != 'builtins' \
598
+ and o.__module__ != '__builtin__' and o.__module__ is not None:
599
+ module = o.__module__ + '.'
600
+
601
+ if hasattr(o, '__qualname__'):
602
+ class_name = o.__qualname__
603
+ elif hasattr(o, '__name__'):
604
+ class_name = o.__name__
605
+ else:
606
+ class_name = o.__class__.__name__
607
+
608
+ return module + class_name
609
+
610
+
611
+ def is_tensor(obj):
612
+ r"""Returns True if `obj` is a PyTorch tensor.
613
+
614
+ Note that this function is simply doing ``isinstance(obj, Tensor)``.
615
+ Using that ``isinstance`` check is better for typechecking with mypy,
616
+ and more explicit - so it's recommended to use that instead of
617
+ ``is_tensor``.
618
+
619
+ Args:
620
+ obj (Object): Object to test
621
+ Example::
622
+
623
+ >>> x = torch.tensor([1, 2, 3])
624
+ >>> torch.is_tensor(x)
625
+ True
626
+
627
+ """
628
+ return isinstance(obj, torch.Tensor)
629
+
630
+
631
+ def is_storage(obj):
632
+ r"""Returns True if `obj` is a PyTorch storage object.
633
+
634
+ Args:
635
+ obj (Object): Object to test
636
+ """
637
+ return type(obj) in _storage_classes
638
+
639
+
640
+ _GLOBAL_DEVICE_CONTEXT = threading.local()
641
+
642
+
643
+ def get_default_device() -> "torch.device":
644
+ r"""Gets the default ``torch.Tensor`` to be allocated on ``device``"""
645
+ global _GLOBAL_DEVICE_CONTEXT
646
+ if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
647
+ device = _GLOBAL_DEVICE_CONTEXT.device_context.device
648
+ if device.index is not None:
649
+ return device
650
+ else:
651
+ # TODO: Call like get_device_index() method corresponding to
652
+ # each device type
653
+ return torch.tensor([]).device
654
+ else:
655
+ return torch.device("cpu")
656
+
657
+
658
+ def set_default_device(device):
659
+ """Sets the default ``torch.Tensor`` to be allocated on ``device``. This
660
+ does not affect factory function calls which are called with an explicit
661
+ ``device`` argument. Factory calls will be performed as if they
662
+ were passed ``device`` as an argument.
663
+
664
+ To only temporarily change the default device instead of setting it
665
+ globally, use ``with torch.device(device):`` instead.
666
+
667
+ The default device is initially ``cpu``. If you set the default tensor
668
+ device to another device (e.g., ``cuda``) without a device index, tensors
669
+ will be allocated on whatever the current device for the device type,
670
+ even after :func:`torch.cuda.set_device` is called.
671
+
672
+ .. warning::
673
+
674
+ This function imposes a slight performance cost on every Python
675
+ call to the torch API (not just factory functions). If this
676
+ is causing problems for you, please comment on
677
+ https://github.com/pytorch/pytorch/issues/92701
678
+
679
+ .. note::
680
+
681
+ This doesn't affect functions that create tensors that share the same memory as the input, like:
682
+ :func:`torch.from_numpy` and :func:`torch.frombuffer`
683
+
684
+ Args:
685
+ device (device or string): the device to set as default
686
+
687
+ Example::
688
+
689
+ >>> # xdoctest: +SKIP("requires cuda, changes global state")
690
+ >>> torch.get_default_device()
691
+ device(type='cpu')
692
+ >>> torch.set_default_device('cuda') # current device is 0
693
+ >>> torch.get_default_device()
694
+ device(type='cuda', index=0)
695
+ >>> torch.set_default_device('cuda')
696
+ >>> torch.cuda.set_device('cuda:1') # current device is 1
697
+ >>> torch.get_default_device()
698
+ device(type='cuda', index=1)
699
+ >>> torch.set_default_device('cuda:1')
700
+ >>> torch.get_default_device()
701
+ device(type='cuda', index=1)
702
+
703
+ """
704
+ global _GLOBAL_DEVICE_CONTEXT
705
+ if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"):
706
+ device_context = _GLOBAL_DEVICE_CONTEXT.device_context
707
+ if device_context is not None:
708
+ device_context.__exit__(None, None, None)
709
+
710
+ if device is None:
711
+ device_context = None
712
+ else:
713
+ from torch.utils._device import DeviceContext
714
+ device_context = DeviceContext(device)
715
+ device_context.__enter__()
716
+ _GLOBAL_DEVICE_CONTEXT.device_context = device_context
717
+
718
+
719
+ def set_default_tensor_type(t):
720
+ r"""
721
+ .. warning::
722
+
723
+ This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and
724
+ :func:`torch.set_default_device()` as alternatives.
725
+
726
+ Sets the default ``torch.Tensor`` type to floating point tensor type
727
+ ``t``. This type will also be used as default floating point type for
728
+ type inference in :func:`torch.tensor`.
729
+
730
+ The default floating point tensor type is initially ``torch.FloatTensor``.
731
+
732
+ Args:
733
+ t (type or string): the floating point tensor type or its name
734
+
735
+ Example::
736
+
737
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
738
+ >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
739
+ torch.float32
740
+ >>> torch.set_default_tensor_type(torch.DoubleTensor)
741
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
742
+ torch.float64
743
+
744
+ """
745
+ if isinstance(t, str):
746
+ t = _import_dotted_name(t)
747
+ _C._set_default_tensor_type(t)
748
+
749
+
750
+ def set_default_dtype(d):
751
+ r"""
752
+
753
+ Sets the default floating point dtype to :attr:`d`. Supports torch.float32
754
+ and torch.float64 as inputs. Other dtypes may be accepted without complaint
755
+ but are not supported and are unlikely to work as expected.
756
+
757
+ When PyTorch is initialized its default floating point dtype is torch.float32,
758
+ and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
759
+ type inference. The default floating point dtype is used to:
760
+
761
+ 1. Implicitly determine the default complex dtype. When the default floating point
762
+ type is float32 the default complex dtype is complex64, and when the default
763
+ floating point type is float64 the default complex type is complex128.
764
+ 2. Infer the dtype for tensors constructed using Python floats or complex Python
765
+ numbers. See examples below.
766
+ 3. Determine the result of type promotion between bool and integer tensors and
767
+ Python floats and complex Python numbers.
768
+
769
+ Args:
770
+ d (:class:`torch.dtype`): the floating point dtype to make the default.
771
+ Either torch.float32 or torch.float64.
772
+
773
+ Example:
774
+ >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
775
+ >>> # initial default for floating point is torch.float32
776
+ >>> # Python floats are interpreted as float32
777
+ >>> torch.tensor([1.2, 3]).dtype
778
+ torch.float32
779
+ >>> # initial default for floating point is torch.complex64
780
+ >>> # Complex Python numbers are interpreted as complex64
781
+ >>> torch.tensor([1.2, 3j]).dtype
782
+ torch.complex64
783
+
784
+ >>> torch.set_default_dtype(torch.float64)
785
+
786
+ >>> # Python floats are now interpreted as float64
787
+ >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
788
+ torch.float64
789
+ >>> # Complex Python numbers are now interpreted as complex128
790
+ >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
791
+ torch.complex128
792
+
793
+ """
794
+ _C._set_default_dtype(d)
795
+
796
+ def use_deterministic_algorithms(mode: builtins.bool, *, warn_only: builtins.bool = False) -> None:
797
+ r""" Sets whether PyTorch operations must use "deterministic"
798
+ algorithms. That is, algorithms which, given the same input, and when
799
+ run on the same software and hardware, always produce the same output.
800
+ When enabled, operations will use deterministic algorithms when available,
801
+ and if only nondeterministic algorithms are available they will throw a
802
+ :class:`RuntimeError` when called.
803
+
804
+ .. note:: This setting alone is not always enough to make an application
805
+ reproducible. Refer to :ref:`reproducibility` for more information.
806
+
807
+ .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
808
+ interface for this feature.
809
+
810
+ The following normally-nondeterministic operations will act
811
+ deterministically when ``mode=True``:
812
+
813
+ * :class:`torch.nn.Conv1d` when called on CUDA tensor
814
+ * :class:`torch.nn.Conv2d` when called on CUDA tensor
815
+ * :class:`torch.nn.Conv3d` when called on CUDA tensor
816
+ * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
817
+ * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
818
+ * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
819
+ * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
820
+ * :func:`torch.bmm` when called on sparse-dense CUDA tensors
821
+ * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
822
+ and the index is a list of tensors
823
+ * :func:`torch.Tensor.index_put` with ``accumulate=False``
824
+ * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
825
+ tensor
826
+ * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
827
+ tensor
828
+ * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
829
+ * :func:`torch.gather` when called on a CUDA tensor that requires grad
830
+ * :func:`torch.index_add` when called on CUDA tensor
831
+ * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
832
+ * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
833
+ * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
834
+ * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
835
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
836
+
837
+ The following normally-nondeterministic operations will throw a
838
+ :class:`RuntimeError` when ``mode=True``:
839
+
840
+ * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
841
+ * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
842
+ * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
843
+ * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
844
+ * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
845
+ * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
846
+ * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
847
+ * :class:`torch.nn.MaxUnpool1d`
848
+ * :class:`torch.nn.MaxUnpool2d`
849
+ * :class:`torch.nn.MaxUnpool3d`
850
+ * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
851
+ and one of the following modes is used:
852
+
853
+ - ``linear``
854
+ - ``bilinear``
855
+ - ``bicubic``
856
+ - ``trilinear``
857
+
858
+ * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
859
+ * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
860
+ * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
861
+ * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
862
+ * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
863
+ * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
864
+ * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
865
+ * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
866
+ ``mode='max'``
867
+ * :func:`torch.Tensor.put_` when ``accumulate=False``
868
+ * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
869
+ * :func:`torch.histc` when called on a CUDA tensor
870
+ * :func:`torch.bincount` when called on a CUDA tensor and ``weights``
871
+ tensor is given
872
+ * :func:`torch.kthvalue` with called on a CUDA tensor
873
+ * :func:`torch.median` with indices output when called on a CUDA tensor
874
+ * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
875
+ * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
876
+ * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
877
+ * :func:`torch.Tensor.resize_` when called with a quantized tensor
878
+
879
+ In addition, several operations fill uninitialized memory when this setting
880
+ is turned on and when
881
+ :attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
882
+ See the documentation for that attribute for more information.
883
+
884
+ A handful of CUDA operations are nondeterministic if the CUDA version is
885
+ 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
886
+ or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
887
+ details: `<https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility>`_
888
+ If one of these environment variable configurations is not set, a :class:`RuntimeError`
889
+ will be raised from these operations when called with CUDA tensors:
890
+
891
+ * :func:`torch.mm`
892
+ * :func:`torch.mv`
893
+ * :func:`torch.bmm`
894
+
895
+ Note that deterministic operations tend to have worse performance than
896
+ nondeterministic operations.
897
+
898
+ .. note::
899
+
900
+ This flag does not detect or prevent nondeterministic behavior caused
901
+ by calling an inplace operation on a tensor with an internal memory
902
+ overlap or by giving such a tensor as the :attr:`out` argument for an
903
+ operation. In these cases, multiple writes of different data may target
904
+ a single memory location, and the order of writes is not guaranteed.
905
+
906
+ Args:
907
+ mode (:class:`bool`): If True, makes potentially nondeterministic
908
+ operations switch to a deterministic algorithm or throw a runtime
909
+ error. If False, allows nondeterministic operations.
910
+
911
+ Keyword args:
912
+ warn_only (:class:`bool`, optional): If True, operations that do not
913
+ have a deterministic implementation will throw a warning instead of
914
+ an error. Default: ``False``
915
+
916
+ Example::
917
+
918
+ >>> # xdoctest: +SKIP
919
+ >>> torch.use_deterministic_algorithms(True)
920
+
921
+ # Forward mode nondeterministic error
922
+ >>> torch.randn(10, device='cuda').kthvalue(1)
923
+ ...
924
+ RuntimeError: kthvalue CUDA does not have a deterministic implementation...
925
+
926
+ # Backward mode nondeterministic error
927
+ >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
928
+ ...
929
+ RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
930
+ """
931
+ _C._set_deterministic_algorithms(mode, warn_only=warn_only)
932
+
933
+ def are_deterministic_algorithms_enabled() -> builtins.bool:
934
+ r"""Returns True if the global deterministic flag is turned on. Refer to
935
+ :func:`torch.use_deterministic_algorithms` documentation for more details.
936
+ """
937
+ return _C._get_deterministic_algorithms()
938
+
939
+ def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:
940
+ r"""Returns True if the global deterministic flag is set to warn only.
941
+ Refer to :func:`torch.use_deterministic_algorithms` documentation for more
942
+ details.
943
+ """
944
+ return _C._get_deterministic_algorithms_warn_only()
945
+
946
+ def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
947
+ r"""Sets the debug mode for deterministic operations.
948
+
949
+ .. note:: This is an alternative interface for
950
+ :func:`torch.use_deterministic_algorithms`. Refer to that function's
951
+ documentation for details about affected operations.
952
+
953
+ Args:
954
+ debug_mode(str or int): If "default" or 0, don't error or warn on
955
+ nondeterministic operations. If "warn" or 1, warn on
956
+ nondeterministic operations. If "error" or 2, error on
957
+ nondeterministic operations.
958
+ """
959
+
960
+ # NOTE: builtins.int is used here because int in this scope resolves
961
+ # to torch.int
962
+ if not isinstance(debug_mode, (builtins.int, str)):
963
+ raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
964
+
965
+ if isinstance(debug_mode, str):
966
+ if debug_mode == 'default':
967
+ debug_mode = 0
968
+ elif debug_mode == 'warn':
969
+ debug_mode = 1
970
+ elif debug_mode == 'error':
971
+ debug_mode = 2
972
+ else:
973
+ raise RuntimeError(
974
+ 'invalid value of debug_mode, expected one of `default`, '
975
+ f'`warn`, `error`, but got {debug_mode}')
976
+
977
+ if debug_mode == 0:
978
+ _C._set_deterministic_algorithms(False)
979
+ elif debug_mode == 1:
980
+ _C._set_deterministic_algorithms(True, warn_only=True)
981
+ elif debug_mode == 2:
982
+ _C._set_deterministic_algorithms(True)
983
+ else:
984
+ raise RuntimeError(
985
+ 'invalid value of debug_mode, expected 0, 1, or 2, '
986
+ f'but got {debug_mode}')
987
+
988
+ def get_deterministic_debug_mode() -> builtins.int:
989
+ r"""Returns the current value of the debug mode for deterministic
990
+ operations. Refer to :func:`torch.set_deterministic_debug_mode`
991
+ documentation for more details.
992
+ """
993
+
994
+ if _C._get_deterministic_algorithms():
995
+ if _C._get_deterministic_algorithms_warn_only():
996
+ return 1
997
+ else:
998
+ return 2
999
+ else:
1000
+ return 0
1001
+
1002
+ def get_float32_matmul_precision() -> builtins.str:
1003
+ r"""Returns the current value of float32 matrix multiplication precision. Refer to
1004
+ :func:`torch.set_float32_matmul_precision` documentation for more details.
1005
+ """
1006
+ return _C._get_float32_matmul_precision()
1007
+
1008
+ def set_float32_matmul_precision(precision: str) -> None:
1009
+ r"""Sets the internal precision of float32 matrix multiplications.
1010
+
1011
+ Running float32 matrix multiplications in lower precision may significantly increase
1012
+ performance, and in some programs the loss of precision has a negligible impact.
1013
+
1014
+ Supports three settings:
1015
+
1016
+ * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
1017
+ bits with 23 bits explicitly stored) for internal computations.
1018
+ * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
1019
+ mantissa bits explicitly stored) or treat each float32 number as the sum of two bfloat16 numbers
1020
+ (approximately 16 mantissa bits with 14 bits explicitly stored), if the appropriate fast matrix multiplication
1021
+ algorithms are available. Otherwise float32 matrix multiplications are computed
1022
+ as if the precision is "highest". See below for more information on the bfloat16
1023
+ approach.
1024
+ * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
1025
+ bits with 7 bits explicitly stored) for internal computations, if a fast matrix multiplication algorithm
1026
+ using that datatype internally is available. Otherwise float32
1027
+ matrix multiplications are computed as if the precision is "high".
1028
+
1029
+ When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
1030
+ that is more complicated than simply truncating to some smaller number mantissa bits
1031
+ (e.g. 10 for TensorFloat32, 7 for bfloat16 explicitly stored). Refer to [Henry2019]_ for a complete
1032
+ description of this algorithm. To briefly explain here, the first step is to realize
1033
+ that we can perfectly encode a single float32 number as the sum of three bfloat16
1034
+ numbers (because float32 has 23 mantissa bits while bfloat16 has 7 explicitly stored, and both have the
1035
+ same number of exponent bits). This means that the product of two float32 numbers can
1036
+ be exactly given by the sum of nine products of bfloat16 numbers. We can then trade
1037
+ accuracy for speed by dropping some of these products. The "high" precision algorithm
1038
+ specifically keeps only the three most significant products, which conveniently excludes
1039
+ all of the products involving the last 8 mantissa bits of either input. This means that
1040
+ we can represent our inputs as the sum of two bfloat16 numbers rather than three.
1041
+ Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
1042
+ float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
1043
+ precision than it is to do a single multiplication with float32 precision.
1044
+
1045
+ .. [Henry2019] http://arxiv.org/abs/1904.06376
1046
+
1047
+ .. note::
1048
+
1049
+ This does not change the output dtype of float32 matrix multiplications,
1050
+ it controls how the internal computation of the matrix multiplication is performed.
1051
+
1052
+ .. note::
1053
+
1054
+ This does not change the precision of convolution operations. Other flags,
1055
+ like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
1056
+ operations.
1057
+
1058
+ .. note::
1059
+
1060
+ This flag currently only affects one native device type: CUDA.
1061
+ If "high" or "medium" are set then the TensorFloat32 datatype will be used
1062
+ when computing float32 matrix multiplications, equivalent to setting
1063
+ `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
1064
+ is set then the float32 datatype is used for internal computations, equivalent
1065
+ to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
1066
+
1067
+ Args:
1068
+ precision(str): can be set to "highest" (default), "high", or "medium" (see above).
1069
+
1070
+ """
1071
+ _C._set_float32_matmul_precision(precision)
1072
+
1073
+ def set_warn_always(b: builtins.bool) -> None:
1074
+ r"""When this flag is False (default) then some PyTorch warnings may only
1075
+ appear once per process. This helps avoid excessive warning information.
1076
+ Setting it to True causes these warnings to always appear, which may be
1077
+ helpful when debugging.
1078
+
1079
+ Args:
1080
+ b (:class:`bool`): If True, force warnings to always be emitted
1081
+ If False, set to the default behaviour
1082
+ """
1083
+ _C._set_warnAlways(b)
1084
+
1085
+ def is_warn_always_enabled() -> builtins.bool:
1086
+ r"""Returns True if the global warn_always flag is turned on. Refer to
1087
+ :func:`torch.set_warn_always` documentation for more details.
1088
+ """
1089
+ return _C._get_warnAlways()
1090
+
1091
+ ################################################################################
1092
+ # Define error checking functions
1093
+ ################################################################################
1094
+
1095
+ # These error checking functions must be kept consistent with their C++
1096
+ # equivalents. Their C++ equivalents are mentioned where applicable.
1097
+
1098
+ def _check_with(error_type, cond: Union[builtins.bool, SymBool], message: Callable[[], str]): # noqa: F811
1099
+ if not isinstance(cond, (builtins.bool, torch.SymBool)):
1100
+ raise TypeError(f'cond must be a bool, but got {type(cond)}')
1101
+
1102
+ from torch.fx.experimental.symbolic_shapes import expect_true
1103
+ if expect_true(cond):
1104
+ return
1105
+
1106
+ # error_type must be a subclass of Exception and not subclass of Warning
1107
+ assert issubclass(error_type, Exception) and not issubclass(error_type, Warning)
1108
+
1109
+ if message is None:
1110
+ message_evaluated = (
1111
+ 'Expected cond to be True, but got False. (Could this error '
1112
+ 'message be improved? If so, please report an enhancement request '
1113
+ 'to PyTorch.)')
1114
+
1115
+ else:
1116
+ if not callable(message):
1117
+ raise TypeError('message must be a callable')
1118
+
1119
+ message_evaluated = str(message())
1120
+
1121
+ raise error_type(message_evaluated)
1122
+
1123
+ def _check(cond, message=None): # noqa: F811
1124
+ r"""Throws error containing an optional message if the specified condition
1125
+ is False.
1126
+
1127
+ Error type: ``RuntimeError``
1128
+
1129
+ C++ equivalent: ``TORCH_CHECK``
1130
+
1131
+ Args:
1132
+ cond (:class:`bool`): If False, throw error
1133
+
1134
+ message (Callable, optional): Callable that returns either a string or
1135
+ an object that has a ``__str__()`` method to be used as the error
1136
+ message. Default: ``None``
1137
+ """
1138
+ _check_with(RuntimeError, cond, message)
1139
+
1140
+ def _check_is_size(i, message=None):
1141
+ """Checks that a given integer is a valid size (i.e., is non-negative).
1142
+ You should use this over _check(i >= 0) because we can use the semantic
1143
+ information (that i is a size) to make some further inferences in case
1144
+ i is an unbacked SymInt.
1145
+
1146
+ NB: Do NOT use this in contexts where a -1 size would be valid (indicating
1147
+ to infer the size from context, or if you should wrap-around or truncate).
1148
+ Only use this if the only valid value is an honest to goodness size.
1149
+ """
1150
+ # This is responsible for the expect_true
1151
+ _check(i >= 0, message)
1152
+ from torch.fx.experimental.symbolic_shapes import _advise_is_size
1153
+ _advise_is_size(i)
1154
+
1155
+ def _check_index(cond, message=None): # noqa: F811
1156
+ r"""Throws error containing an optional message if the specified condition
1157
+ is False.
1158
+
1159
+ Error type: ``IndexError``
1160
+
1161
+ C++ equivalent: ``TORCH_CHECK_INDEX``
1162
+
1163
+ Args:
1164
+ cond (:class:`bool`): If False, throw error
1165
+
1166
+ message (Callable, optional): Callable that returns either a string or
1167
+ an object that has a ``__str__()`` method to be used as the error
1168
+ message. Default: ``None``
1169
+ """
1170
+ _check_with(IndexError, cond, message)
1171
+
1172
+ def _check_value(cond, message=None): # noqa: F811
1173
+ r"""Throws error containing an optional message if the specified condition
1174
+ is False.
1175
+
1176
+ Error type: ``ValueError``
1177
+
1178
+ C++ equivalent: ``TORCH_CHECK_VALUE``
1179
+
1180
+ Args:
1181
+ cond (:class:`bool`): If False, throw error
1182
+
1183
+ message (Callable, optional): Callable that returns either a string or
1184
+ an object that has a ``__str__()`` method to be used as the error
1185
+ message. Default: ``None``
1186
+ """
1187
+ _check_with(ValueError, cond, message)
1188
+
1189
+ def _check_type(cond, message=None): # noqa: F811
1190
+ r"""Throws error containing an optional message if the specified condition
1191
+ is False.
1192
+
1193
+ Error type: ``TypeError``
1194
+
1195
+ C++ equivalent: ``TORCH_CHECK_TYPE``
1196
+
1197
+ Args:
1198
+ cond (:class:`bool`): If False, throw error
1199
+
1200
+ message (Callable, optional): Callable that returns either a string or
1201
+ an object that has a ``__str__()`` method to be used as the error
1202
+ message. Default: ``None``
1203
+ """
1204
+ _check_with(TypeError, cond, message)
1205
+
1206
+ def _check_not_implemented(cond, message=None): # noqa: F811
1207
+ r"""Throws error containing an optional message if the specified condition
1208
+ is False.
1209
+
1210
+ Error type: ``NotImplementedError``
1211
+
1212
+ C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED``
1213
+
1214
+ Args:
1215
+ cond (:class:`bool`): If False, throw error
1216
+
1217
+ message (Callable, optional): Callable that returns either a string or
1218
+ an object that has a ``__str__()`` method to be used as the error
1219
+ message. Default: ``None``
1220
+ """
1221
+ _check_with(NotImplementedError, cond, message)
1222
+
1223
+ def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811
1224
+ if not torch.is_tensor(cond):
1225
+ raise TypeError(f'cond must be a tensor, but got {type(cond)}')
1226
+
1227
+ if not cond.dtype == torch.bool:
1228
+ raise TypeError(
1229
+ f'cond tensor must have dtype torch.bool, but got {cond.dtype}')
1230
+
1231
+ _check_with(error_type, cond._is_all_true().item(), message)
1232
+
1233
+ # C++ equivalent: `TORCH_CHECK_TENSOR_ALL`
1234
+ def _check_tensor_all(cond, message=None): # noqa: F811
1235
+ r"""Throws error containing an optional message if the specified condition
1236
+ is False.
1237
+
1238
+ Error type: ``RuntimeError``
1239
+
1240
+ C++ equivalent: ``TORCH_CHECK_TENSOR_ALL``
1241
+
1242
+ Args:
1243
+ cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any
1244
+ element is ``False``, throw error
1245
+
1246
+ message (Callable, optional): Callable that returns either a string or
1247
+ an object that has a ``__str__()`` method to be used as the error
1248
+ message. Default: ``None``
1249
+ """
1250
+ _check_tensor_all_with(RuntimeError, cond, message)
1251
+
1252
+ ################################################################################
1253
+ # Define numeric constants
1254
+ ################################################################################
1255
+
1256
+ # For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
1257
+ # NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
1258
+ from math import e , nan , inf , pi
1259
+ __all__.extend(['e', 'pi', 'nan', 'inf'])
1260
+
1261
+ ################################################################################
1262
+ # Define Storage and Tensor classes
1263
+ ################################################################################
1264
+
1265
+ from ._tensor import Tensor
1266
+ from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal
1267
+
1268
+ # NOTE: New <type>Storage classes should never be added. When adding a new
1269
+ # dtype, use torch.storage.TypedStorage directly.
1270
+
1271
+ class ByteStorage(_LegacyStorage):
1272
+ @classproperty
1273
+ def dtype(self):
1274
+ _warn_typed_storage_removal(stacklevel=3)
1275
+ return self._dtype
1276
+
1277
+ @classproperty
1278
+ def _dtype(self):
1279
+ return torch.uint8
1280
+
1281
+ class DoubleStorage(_LegacyStorage):
1282
+ @classproperty
1283
+ def dtype(self):
1284
+ _warn_typed_storage_removal(stacklevel=3)
1285
+ return self._dtype
1286
+
1287
+ @classproperty
1288
+ def _dtype(self):
1289
+ return torch.double
1290
+
1291
+ class FloatStorage(_LegacyStorage):
1292
+ @classproperty
1293
+ def dtype(self):
1294
+ _warn_typed_storage_removal(stacklevel=3)
1295
+ return self._dtype
1296
+
1297
+ @classproperty
1298
+ def _dtype(self):
1299
+ return torch.float
1300
+
1301
+ class HalfStorage(_LegacyStorage):
1302
+ @classproperty
1303
+ def dtype(self):
1304
+ _warn_typed_storage_removal(stacklevel=3)
1305
+ return self._dtype
1306
+
1307
+ @classproperty
1308
+ def _dtype(self):
1309
+ return torch.half
1310
+
1311
+ class LongStorage(_LegacyStorage):
1312
+ @classproperty
1313
+ def dtype(self):
1314
+ _warn_typed_storage_removal(stacklevel=3)
1315
+ return self._dtype
1316
+
1317
+ @classproperty
1318
+ def _dtype(self):
1319
+ return torch.long
1320
+
1321
+ class IntStorage(_LegacyStorage):
1322
+ @classproperty
1323
+ def dtype(self):
1324
+ _warn_typed_storage_removal(stacklevel=3)
1325
+ return self._dtype
1326
+
1327
+ @classproperty
1328
+ def _dtype(self):
1329
+ return torch.int
1330
+
1331
+ class ShortStorage(_LegacyStorage):
1332
+ @classproperty
1333
+ def dtype(self):
1334
+ _warn_typed_storage_removal(stacklevel=3)
1335
+ return self._dtype
1336
+
1337
+ @classproperty
1338
+ def _dtype(self):
1339
+ return torch.short
1340
+
1341
+ class CharStorage(_LegacyStorage):
1342
+ @classproperty
1343
+ def dtype(self):
1344
+ _warn_typed_storage_removal(stacklevel=3)
1345
+ return self._dtype
1346
+
1347
+ @classproperty
1348
+ def _dtype(self):
1349
+ return torch.int8
1350
+
1351
+ class BoolStorage(_LegacyStorage):
1352
+ @classproperty
1353
+ def dtype(self):
1354
+ _warn_typed_storage_removal(stacklevel=3)
1355
+ return self._dtype
1356
+
1357
+ @classproperty
1358
+ def _dtype(self):
1359
+ return torch.bool
1360
+
1361
+ class BFloat16Storage(_LegacyStorage):
1362
+ @classproperty
1363
+ def dtype(self):
1364
+ _warn_typed_storage_removal(stacklevel=3)
1365
+ return self._dtype
1366
+
1367
+ @classproperty
1368
+ def _dtype(self):
1369
+ return torch.bfloat16
1370
+
1371
+ class ComplexDoubleStorage(_LegacyStorage):
1372
+ @classproperty
1373
+ def dtype(self):
1374
+ _warn_typed_storage_removal(stacklevel=3)
1375
+ return self._dtype
1376
+
1377
+ @classproperty
1378
+ def _dtype(self):
1379
+ return torch.cdouble
1380
+
1381
+ class ComplexFloatStorage(_LegacyStorage):
1382
+ @classproperty
1383
+ def dtype(self):
1384
+ _warn_typed_storage_removal(stacklevel=3)
1385
+ return self._dtype
1386
+
1387
+ @classproperty
1388
+ def _dtype(self):
1389
+ return torch.cfloat
1390
+
1391
+ class QUInt8Storage(_LegacyStorage):
1392
+ @classproperty
1393
+ def dtype(self):
1394
+ _warn_typed_storage_removal(stacklevel=3)
1395
+ return self._dtype
1396
+
1397
+ @classproperty
1398
+ def _dtype(self):
1399
+ return torch.quint8
1400
+
1401
+ class QInt8Storage(_LegacyStorage):
1402
+ @classproperty
1403
+ def dtype(self):
1404
+ _warn_typed_storage_removal(stacklevel=3)
1405
+ return self._dtype
1406
+
1407
+ @classproperty
1408
+ def _dtype(self):
1409
+ return torch.qint8
1410
+
1411
+ class QInt32Storage(_LegacyStorage):
1412
+ @classproperty
1413
+ def dtype(self):
1414
+ _warn_typed_storage_removal(stacklevel=3)
1415
+ return self._dtype
1416
+
1417
+ @classproperty
1418
+ def _dtype(self):
1419
+ return torch.qint32
1420
+
1421
+ class QUInt4x2Storage(_LegacyStorage):
1422
+ @classproperty
1423
+ def dtype(self):
1424
+ _warn_typed_storage_removal(stacklevel=3)
1425
+ return self._dtype
1426
+
1427
+ @classproperty
1428
+ def _dtype(self):
1429
+ return torch.quint4x2
1430
+
1431
+ class QUInt2x4Storage(_LegacyStorage):
1432
+ @classproperty
1433
+ def dtype(self):
1434
+ _warn_typed_storage_removal(stacklevel=3)
1435
+ return self._dtype
1436
+
1437
+ @classproperty
1438
+ def _dtype(self):
1439
+ return torch.quint2x4
1440
+
1441
+ _storage_classes = {
1442
+ UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
1443
+ ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
1444
+ QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
1445
+ ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
1446
+ TypedStorage
1447
+ }
1448
+
1449
+ # The _tensor_classes set is initialized by the call to initialize_python_bindings.
1450
+ _tensor_classes: Set[Type] = set()
1451
+
1452
+ # If you edit these imports, please update torch/__init__.py.in as well
1453
+ from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
1454
+ from .serialization import save, load
1455
+ from ._tensor_str import set_printoptions
1456
+
1457
+ ################################################################################
1458
+ # Initialize extension
1459
+ ################################################################################
1460
+
1461
+ def manager_path():
1462
+ if _running_with_deploy() or platform.system() == 'Windows':
1463
+ return b""
1464
+ path = get_file_path('torch', 'bin', 'torch_shm_manager')
1465
+ prepare_multiprocessing_environment(get_file_path('torch'))
1466
+ if not os.path.exists(path):
1467
+ raise RuntimeError("Unable to find torch_shm_manager at " + path)
1468
+ return path.encode('utf-8')
1469
+
1470
+ from torch.amp import autocast, GradScaler
1471
+
1472
+ # Initializing the extension shadows the built-in python float / int classes;
1473
+ # store them for later use by SymInt / SymFloat.
1474
+ py_float = float
1475
+ py_int = int
1476
+
1477
+ # Shared memory manager needs to know the exact location of manager executable
1478
+ _C._initExtension(manager_path())
1479
+ del manager_path
1480
+
1481
+ # Appease the type checker: it can't deal with direct setting of globals().
1482
+ # Note that we will see "too many" functions when reexporting this way; there
1483
+ # is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
1484
+ # so that this import is good enough
1485
+ if TYPE_CHECKING:
1486
+ # Some type signatures pulled in from _VariableFunctions here clash with
1487
+ # signatures already imported. For now these clashes are ignored; see
1488
+ # PR #43339 for details.
1489
+ from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403
1490
+ # Fixup segment_reduce visibility
1491
+ _segment_reduce = segment_reduce
1492
+ del segment_reduce # noqa: F821
1493
+
1494
+ # Ops not to be exposed in `torch` namespace,
1495
+ # mostly helper ops.
1496
+ PRIVATE_OPS = (
1497
+ 'unique_dim',
1498
+ )
1499
+
1500
+ for name in dir(_C._VariableFunctions):
1501
+ if name.startswith('__') or name in PRIVATE_OPS:
1502
+ continue
1503
+ obj = getattr(_C._VariableFunctions, name)
1504
+ obj.__module__ = 'torch'
1505
+ # Hide some APIs that should not be public
1506
+ if name == "segment_reduce":
1507
+ # TODO: Once the undocumented FC window is passed, remove the line bellow
1508
+ globals()[name] = obj
1509
+ name = "_" + name
1510
+ globals()[name] = obj
1511
+ if not name.startswith("_"):
1512
+ __all__.append(name)
1513
+
1514
+
1515
+ ################################################################################
1516
+ # Add torch.dtype instances to the public API
1517
+ ################################################################################
1518
+
1519
+ import torch
1520
+
1521
+ for attribute in dir(torch):
1522
+ if isinstance(getattr(torch, attribute), torch.dtype):
1523
+ __all__.append(attribute)
1524
+
1525
+ ################################################################################
1526
+ # Import TorchDynamo's lazy APIs to avoid circular dependenices
1527
+ ################################################################################
1528
+
1529
+ # needs to be before from .functional import * to avoid circular dependencies
1530
+ from ._compile import _disable_dynamo
1531
+
1532
+ ################################################################################
1533
+ # Import interface functions defined in Python
1534
+ ################################################################################
1535
+
1536
+ # needs to be after the above ATen bindings so we can overwrite from Python side
1537
+ from .functional import * # noqa: F403
1538
+
1539
+
1540
+ ################################################################################
1541
+ # Remove unnecessary members
1542
+ ################################################################################
1543
+
1544
+ del _StorageBase
1545
+ del _LegacyStorage
1546
+
1547
+ ################################################################################
1548
+ # Define _assert
1549
+ ################################################################################
1550
+
1551
+ # needs to be before the submodule imports to avoid circular dependencies
1552
+ def _assert(condition, message):
1553
+ r"""A wrapper around Python's assert which is symbolically traceable.
1554
+ """
1555
+ from .overrides import has_torch_function, handle_torch_function
1556
+
1557
+ if type(condition) is not torch.Tensor and has_torch_function((condition,)):
1558
+ return handle_torch_function(_assert, (condition,), condition, message)
1559
+ assert condition, message
1560
+
1561
+ ################################################################################
1562
+ # Import most common subpackages
1563
+ ################################################################################
1564
+
1565
+ # Use the redundant form so that type checkers know that these are a part of
1566
+ # the public API. The "regular" import lines are there solely for the runtime
1567
+ # side effect of adding to the imported module's members for other users.
1568
+ from torch import cuda as cuda
1569
+ from torch import cpu as cpu
1570
+ from torch import mps as mps
1571
+ from torch import xpu as xpu
1572
+ from torch import autograd as autograd
1573
+ from torch.autograd import (
1574
+ no_grad as no_grad,
1575
+ enable_grad as enable_grad,
1576
+ set_grad_enabled as set_grad_enabled,
1577
+ inference_mode as inference_mode,
1578
+ )
1579
+ from torch import fft as fft
1580
+ from torch import futures as futures
1581
+ from torch import _awaits as _awaits
1582
+ from torch import nested as nested
1583
+ from torch import nn as nn
1584
+ from torch.signal import windows as windows
1585
+ from torch import optim as optim
1586
+ import torch.optim._multi_tensor
1587
+ from torch import multiprocessing as multiprocessing
1588
+ from torch import sparse as sparse
1589
+ from torch import special as special
1590
+ import torch.utils.backcompat
1591
+ from torch import jit as jit
1592
+ from torch import linalg as linalg
1593
+ from torch import hub as hub
1594
+ from torch import random as random
1595
+ from torch import distributions as distributions
1596
+ from torch import testing as testing
1597
+ from torch import backends as backends
1598
+ import torch.utils.data
1599
+ from torch import __config__ as __config__
1600
+ from torch import __future__ as __future__
1601
+ from torch import profiler as profiler
1602
+
1603
+ # Quantized, sparse, AO, etc. should be last to get imported, as nothing
1604
+ # is expected to depend on them.
1605
+ from torch import ao as ao
1606
+ # nn.quant* depends on ao -- so should be after those.
1607
+ import torch.nn.quantizable
1608
+ import torch.nn.quantized
1609
+ import torch.nn.qat
1610
+ import torch.nn.intrinsic
1611
+
1612
+ _C._init_names(list(torch._storage_classes))
1613
+
1614
+ # attach docstrings to torch and tensor functions
1615
+ from . import _torch_docs, _tensor_docs, _storage_docs
1616
+ del _torch_docs, _tensor_docs, _storage_docs
1617
+
1618
+
1619
+ def compiled_with_cxx11_abi() -> builtins.bool:
1620
+ r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
1621
+ return _C._GLIBCXX_USE_CXX11_ABI
1622
+
1623
+
1624
+ # Import the ops "namespace"
1625
+ from torch._ops import ops
1626
+ from torch._classes import classes
1627
+ import torch._library
1628
+
1629
+ # quantization depends on torch.fx
1630
+ # Import quantization
1631
+ from torch import quantization as quantization
1632
+
1633
+ # Import the quasi random sampler
1634
+ from torch import quasirandom as quasirandom
1635
+
1636
+ # If you are seeing this, it means that this call site was not checked if
1637
+ # the memory format could be preserved, and it was switched to old default
1638
+ # behaviour of contiguous
1639
+ legacy_contiguous_format = contiguous_format
1640
+
1641
+ # Register fork handler to initialize OpenMP in child processes (see gh-28389)
1642
+ from torch.multiprocessing._atfork import register_after_fork
1643
+ register_after_fork(torch.get_num_threads)
1644
+ del register_after_fork
1645
+
1646
+ # Import tools that require fully imported torch (for applying
1647
+ # torch.jit.script as a decorator, for instance):
1648
+ from ._lobpcg import lobpcg as lobpcg
1649
+
1650
+ # These were previously defined in native_functions.yaml and appeared on the
1651
+ # `torch` namespace, but we moved them to c10 dispatch to facilitate custom
1652
+ # class usage. We add these lines here to preserve backward compatibility.
1653
+ quantized_lstm = torch.ops.aten.quantized_lstm
1654
+ quantized_gru = torch.ops.aten.quantized_gru
1655
+
1656
+ from torch.utils.dlpack import from_dlpack, to_dlpack
1657
+
1658
+ # Import experimental masked operations support. See
1659
+ # [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
1660
+ # information.
1661
+ from . import masked
1662
+
1663
+ # Import removed ops with error message about removal
1664
+ from ._linalg_utils import ( # type: ignore[misc]
1665
+ matrix_rank,
1666
+ eig,
1667
+ solve,
1668
+ lstsq,
1669
+ )
1670
+ from ._linalg_utils import _symeig as symeig # type: ignore[misc]
1671
+
1672
+ class _TorchCompileInductorWrapper:
1673
+ compiler_name = "inductor"
1674
+
1675
+ def __init__(self, mode, options, dynamic):
1676
+ self.config: Dict[str, Any] = dict()
1677
+ self.dynamic = dynamic
1678
+ self.apply_mode(mode)
1679
+ self.apply_options(options)
1680
+
1681
+ if self.config.get("triton.cudagraphs", False):
1682
+ os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
1683
+ # FIXME: CUDA Graph does not work well with CUPTI teardown.
1684
+ # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
1685
+ # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
1686
+ # Workaround: turn off CUPTI teardown when using CUDA Graphs.
1687
+ os.environ["TEARDOWN_CUPTI"] = "0"
1688
+
1689
+ def __eq__(self, other):
1690
+ return (isinstance(other, _TorchCompileInductorWrapper) and
1691
+ self.config == other.config and
1692
+ self.dynamic == other.dynamic)
1693
+
1694
+ def apply_mode(self, mode: Optional[str]):
1695
+ if mode is None or mode == "default":
1696
+ pass
1697
+ elif mode in ("reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"):
1698
+ from torch._inductor import list_mode_options
1699
+ self.apply_options(list_mode_options(mode, self.dynamic))
1700
+ else:
1701
+ raise RuntimeError(
1702
+ f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs"
1703
+ )
1704
+
1705
+ def apply_options(self, options: Optional[Dict[str, Any]]):
1706
+ if not options:
1707
+ return
1708
+
1709
+ from torch._inductor import config
1710
+ current_config: Dict[str, Any] = config.shallow_copy_dict()
1711
+
1712
+ for key, val in options.items():
1713
+ attr_name = key.replace("-", "_")
1714
+ if attr_name not in current_config:
1715
+ raise RuntimeError(
1716
+ f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
1717
+ )
1718
+ if type(val) is not type(current_config[attr_name]):
1719
+ val_type_str = type(val).__name__
1720
+ expected_type_str = type(current_config[attr_name]).__name__
1721
+ raise RuntimeError(
1722
+ f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
1723
+ )
1724
+ self.config[attr_name] = val
1725
+
1726
+ def __call__(self, model_, inputs_):
1727
+ from torch._inductor.compile_fx import compile_fx
1728
+
1729
+ return compile_fx(model_, inputs_, config_patches=self.config)
1730
+
1731
+ def get_compiler_config(self):
1732
+ from torch._inductor.compile_fx import get_patched_config_dict
1733
+ return get_patched_config_dict(config_patches=self.config)
1734
+
1735
+ def reset(self):
1736
+ from torch._inductor import config
1737
+ if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
1738
+ if self.config.get("triton.cudagraphs", True):
1739
+ from torch._inductor.cudagraph_trees import reset_cudagraph_trees
1740
+ reset_cudagraph_trees()
1741
+
1742
+ class _TorchCompileWrapper:
1743
+ def __init__(self, backend, mode, options, dynamic):
1744
+ from torch._dynamo.backends.registry import lookup_backend
1745
+
1746
+ if isinstance(backend, str):
1747
+ self.compiler_name = backend
1748
+ elif hasattr(backend, "__name__"):
1749
+ self.compiler_name = backend.__name__
1750
+ else:
1751
+ self.compiler_name = str(backend)
1752
+ self.dynamic = dynamic
1753
+ self.compiler_fn = lookup_backend(backend)
1754
+ self.kwargs = {}
1755
+ # only pass the args if they non-empty
1756
+ if mode and mode != "default":
1757
+ self.kwargs["mode"] = mode
1758
+ if options:
1759
+ self.kwargs["options"] = options
1760
+
1761
+ def __eq__(self, other):
1762
+ return (isinstance(other, _TorchCompileWrapper) and
1763
+ self.compiler_fn == other.compiler_fn and
1764
+ self.kwargs == other.kwargs and
1765
+ self.dynamic == other.dynamic)
1766
+
1767
+ def __call__(self, model_, inputs_):
1768
+ return self.compiler_fn(model_, inputs_, **self.kwargs)
1769
+
1770
+ def reset(self):
1771
+ if hasattr(self.compiler_fn, "reset"):
1772
+ self.compiler_fn.reset()
1773
+
1774
+
1775
+ def compile(model: Optional[Callable] = None, *,
1776
+ fullgraph: builtins.bool = False,
1777
+ dynamic: Optional[builtins.bool] = None,
1778
+ backend: Union[str, Callable] = "inductor",
1779
+ mode: Union[str, None] = None,
1780
+ options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
1781
+ disable: builtins.bool = False) -> Callable:
1782
+ """
1783
+ Optimizes given model/function using TorchDynamo and specified backend.
1784
+
1785
+ Concretely, for every frame executed within the compiled region, we will attempt
1786
+ to compile it and cache the compiled result on the code object for future
1787
+ use. A single frame may be compiled multiple times if previous compiled
1788
+ results are not applicable for subsequent calls (this is called a "guard
1789
+ failure), you can use TORCH_LOGS=guards to debug these situations.
1790
+ Multiple compiled results can be associated with a frame up to
1791
+ ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which
1792
+ point we will fall back to eager. Note that compile caches are per
1793
+ *code object*, not frame; if you dynamically create multiple copies of a
1794
+ function, they will all share the same code cache.
1795
+
1796
+ Args:
1797
+ model (Callable): Module/function to optimize
1798
+ fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
1799
+ in the function that it will optimize. If True, then we require that the entire function be
1800
+ capturable into a single graph. If this is not possible (that is, if there are graph breaks),
1801
+ then this will raise an error.
1802
+ dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
1803
+ to generate a kernel that is as dynamic as possible to avoid recompilations when
1804
+ sizes change. This may not always work as some operations/optimizations will
1805
+ force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
1806
+ When this is False, we will NEVER generate dynamic kernels, we will always specialize.
1807
+ By default (None), we automatically detect if dynamism has occurred and compile a more
1808
+ dynamic kernel upon recompile.
1809
+ backend (str or Callable): backend to be used
1810
+
1811
+ - "inductor" is the default backend, which is a good balance between performance and overhead
1812
+
1813
+ - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
1814
+
1815
+ - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
1816
+
1817
+ - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html
1818
+ mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
1819
+
1820
+ - "default" is the default mode, which is a good balance between performance and overhead
1821
+
1822
+ - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
1823
+ useful for small batches. Reduction of overhead can come at the cost of more memory
1824
+ usage, as we will cache the workspace memory required for the invocation so that we
1825
+ do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
1826
+ to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
1827
+ There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
1828
+ to debug.
1829
+
1830
+ - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions
1831
+ It enables CUDA graphs by default.
1832
+
1833
+ - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
1834
+
1835
+ - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
1836
+
1837
+ options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
1838
+
1839
+ - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
1840
+
1841
+ - `max_autotune` which will profile to pick the best matmul configuration
1842
+
1843
+ - `fallback_random` which is useful when debugging accuracy issues
1844
+
1845
+ - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
1846
+
1847
+ - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
1848
+
1849
+ - `trace.enabled` which is the most useful debugging flag to turn on
1850
+
1851
+ - `trace.graph_diagram` which will show you a picture of your graph after fusion
1852
+
1853
+ - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
1854
+ disable (bool): Turn torch.compile() into a no-op for testing
1855
+
1856
+ Example::
1857
+
1858
+ @torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
1859
+ def foo(x):
1860
+ return torch.sin(x) + torch.cos(x)
1861
+
1862
+ """
1863
+ _C._log_api_usage_once("torch.compile")
1864
+ # Temporary until we get proper support for python 3.12
1865
+ if sys.version_info >= (3, 12):
1866
+ raise RuntimeError("Dynamo is not supported on Python 3.12+")
1867
+
1868
+ # Decorator mode
1869
+ if model is None:
1870
+ def fn(model: Callable):
1871
+ if model is None:
1872
+ raise RuntimeError("Model can't be None")
1873
+ return compile(model,
1874
+ fullgraph=fullgraph,
1875
+ dynamic=dynamic,
1876
+ backend=backend,
1877
+ mode=mode,
1878
+ options=options,
1879
+ disable=disable)
1880
+ return fn
1881
+
1882
+ if mode is not None and options is not None:
1883
+ raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")
1884
+ if mode is None and options is None:
1885
+ mode = "default"
1886
+ if backend == "inductor":
1887
+ backend = _TorchCompileInductorWrapper(mode, options, dynamic)
1888
+ else:
1889
+ backend = _TorchCompileWrapper(backend, mode, options, dynamic)
1890
+
1891
+ return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
1892
+
1893
+
1894
+ from torch import export as export
1895
+
1896
+ from torch._higher_order_ops import cond
1897
+
1898
+ def _register_device_module(device_type, module):
1899
+ r"""Register an external runtime module of the specific :attr:`device_type`
1900
+ supported by torch.
1901
+
1902
+ After the :attr:`module` is registered correctly, the user can refer
1903
+ the external runtime module as part of torch with attribute torch.xxx.
1904
+ """
1905
+ # Make sure the device_type represent a supported device type for torch.
1906
+ device_type = torch.device(device_type).type
1907
+ m = sys.modules[__name__]
1908
+ if hasattr(m, device_type):
1909
+ raise RuntimeError(f"The runtime module of '{device_type}' has already "
1910
+ f"been registered with '{getattr(m, device_type)}'")
1911
+ setattr(m, device_type, module)
1912
+ torch_module_name = '.'.join([__name__, device_type])
1913
+ sys.modules[torch_module_name] = module
1914
+
1915
+ # expose return_types
1916
+ from . import return_types
1917
+ from . import library
1918
+ if not TYPE_CHECKING:
1919
+ from . import _meta_registrations
1920
+
1921
+ # Enable CUDA Sanitizer
1922
+ if 'TORCH_CUDA_SANITIZER' in os.environ:
1923
+ import torch.cuda._sanitizer as csan
1924
+
1925
+ csan.enable_cuda_sanitizer()
1926
+
1927
+ # Populate magic methods on SymInt and SymFloat
1928
+ import torch.fx.experimental.sym_node
1929
+
1930
+ from torch import func as func
1931
+ from torch.func import vmap
1932
+
1933
+
1934
+ # The function _sparse_coo_tensor_unsafe is removed from PyTorch
1935
+ # Python API (v. 1.13), here we temporarily provide its replacement
1936
+ # with a deprecation warning.
1937
+ # TODO: remove the function for PyTorch v 1.15.
1938
+ def _sparse_coo_tensor_unsafe(*args, **kwargs):
1939
+ import warnings
1940
+ warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
1941
+ 'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
1942
+ kwargs['check_invariants'] = False
1943
+ return torch.sparse_coo_tensor(*args, **kwargs)
1944
+
1945
+ # Register MPS specific decomps
1946
+ torch.backends.mps._init()
1947
+
1948
+ if not _running_with_deploy():
1949
+ from torch import compiler as compiler
1950
+
1951
+ class _TritonLibrary:
1952
+ lib = torch.library.Library("triton", "DEF")
1953
+ ops_table: Dict[Tuple[str, str], Callable] = {}
1954
+
1955
+ @classmethod
1956
+ def registerOp(cls, op_key, full_schema, op_impl, dispatch_key):
1957
+ if (op_key, dispatch_key) not in cls.ops_table:
1958
+ cls.lib.define(full_schema)
1959
+ cls.lib.impl("triton::" + op_key, op_impl, dispatch_key)
1960
+ cls.ops_table[(op_key, dispatch_key)] = op_impl
1961
+
1962
+ return cls.ops_table[(op_key, dispatch_key)]
1963
+
1964
+
1965
+ # Deprecated attributes
1966
+ _deprecated_attrs = {
1967
+ "has_mps": torch.backends.mps.is_built,
1968
+ "has_cuda": torch.backends.cuda.is_built,
1969
+ "has_cudnn": torch.backends.cudnn.is_available,
1970
+ "has_mkldnn": torch.backends.mkldnn.is_available,
1971
+ }
1972
+
1973
+ if TYPE_CHECKING:
1974
+ # Import the following modules during type checking to enable code intelligence features,
1975
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
1976
+ # imported in user code.
1977
+ from torch import _dynamo as _dynamo
1978
+ from torch import _inductor as _inductor
1979
+ from torch import onnx as onnx
1980
+
1981
+ else:
1982
+ _lazy_modules = {
1983
+ "_dynamo",
1984
+ "_inductor",
1985
+ "_export",
1986
+ # ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit
1987
+ "onnx",
1988
+ }
1989
+
1990
+ def __getattr__(name):
1991
+ # Deprecated attrs
1992
+ replacement = _deprecated_attrs.get(name)
1993
+ if replacement is not None:
1994
+ import warnings
1995
+ warnings.warn(f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'", stacklevel=2)
1996
+ return replacement()
1997
+
1998
+ # Lazy modules
1999
+ if name in _lazy_modules:
2000
+ import importlib
2001
+ return importlib.import_module(f".{name}", __name__)
2002
+
2003
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
2004
+
2005
+
2006
+ def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
2007
+ """
2008
+ Add min/max constraint on the intermediate symbol at tracing time. If called in eager mode,
2009
+ it will still check if the input value is within the specified range.
2010
+ """
2011
+ torch.sym_constrain_range(symbol, min=min, max=max)
2012
+
2013
+
2014
+ def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
2015
+ """
2016
+ This indicates that a given int is size-like, and can be used in any context where a size is expected.
2017
+ You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist()
2018
+ which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve
2019
+ GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
2020
+
2021
+ This function has unusual semantics which distinguish it from
2022
+ constrain_as_value. Specifically, in some circumstances in framework
2023
+ code, we will treat this int as >= 2 (when we do a size-oblivious guard).
2024
+ This makes it easier to This makes it easier to use the unbacked int in
2025
+ size contexts, as we will often attempt to guard on a size being zero/one
2026
+ (e.g., when computing the contiguity of a tensor, or testing if
2027
+ broadcasting can occur), which will not work on unbacked SymInts.
2028
+ However, if we conservatively assume that the size is not zero/one, we will
2029
+ end up with a graph that will still work even if the size is zero/one.
2030
+
2031
+ For more details, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit
2032
+ ```
2033
+ """
2034
+ torch.sym_constrain_range_for_size(symbol, min=min, max=max)
2035
+
2036
+
2037
+ from . import _logging
2038
+ _logging._init_logs()
venv/lib/python3.10/site-packages/torch/_appdirs.py ADDED
@@ -0,0 +1,666 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # Copyright (c) 2005-2010 ActiveState Software Inc.
4
+ # Copyright (c) 2013 Eddy Petrișor
5
+
6
+ # flake8: noqa
7
+
8
+ """
9
+ This file is directly from
10
+ https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
11
+
12
+ The license of https://github.com/ActiveState/appdirs copied below:
13
+
14
+
15
+ # This is the MIT license
16
+
17
+ Copyright (c) 2010 ActiveState Software Inc.
18
+
19
+ Permission is hereby granted, free of charge, to any person obtaining a
20
+ copy of this software and associated documentation files (the
21
+ "Software"), to deal in the Software without restriction, including
22
+ without limitation the rights to use, copy, modify, merge, publish,
23
+ distribute, sublicense, and/or sell copies of the Software, and to
24
+ permit persons to whom the Software is furnished to do so, subject to
25
+ the following conditions:
26
+
27
+ The above copyright notice and this permission notice shall be included
28
+ in all copies or substantial portions of the Software.
29
+
30
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
31
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
33
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
34
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
35
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
36
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37
+ """
38
+
39
+ """Utilities for determining application-specific dirs.
40
+
41
+ See <https://github.com/ActiveState/appdirs> for details and usage.
42
+ """
43
+ # Dev Notes:
44
+ # - MSDN on where to store app data files:
45
+ # http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
46
+ # - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
47
+ # - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
48
+
49
+ __version__ = "1.4.4"
50
+ __version_info__ = tuple(int(segment) for segment in __version__.split("."))
51
+
52
+
53
+ import os
54
+ import sys
55
+
56
+ unicode = str
57
+
58
+ if sys.platform.startswith("java"):
59
+ import platform
60
+
61
+ os_name = platform.java_ver()[3][0]
62
+ if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
63
+ system = "win32"
64
+ elif os_name.startswith("Mac"): # "Mac OS X", etc.
65
+ system = "darwin"
66
+ else: # "Linux", "SunOS", "FreeBSD", etc.
67
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
68
+ # are actually checked for and the rest of the module expects
69
+ # *sys.platform* style strings.
70
+ system = "linux2"
71
+ else:
72
+ system = sys.platform
73
+
74
+
75
+ def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
76
+ r"""Return full path to the user-specific data dir for this application.
77
+
78
+ "appname" is the name of application.
79
+ If None, just the system directory is returned.
80
+ "appauthor" (only used on Windows) is the name of the
81
+ appauthor or distributing body for this application. Typically
82
+ it is the owning company name. This falls back to appname. You may
83
+ pass False to disable it.
84
+ "version" is an optional version path element to append to the
85
+ path. You might want to use this if you want multiple versions
86
+ of your app to be able to run independently. If used, this
87
+ would typically be "<major>.<minor>".
88
+ Only applied when appname is present.
89
+ "roaming" (boolean, default False) can be set True to use the Windows
90
+ roaming appdata directory. That means that for users on a Windows
91
+ network setup for roaming profiles, this user data will be
92
+ sync'd on login. See
93
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
94
+ for a discussion of issues.
95
+
96
+ Typical user data directories are:
97
+ Mac OS X: ~/Library/Application Support/<AppName>
98
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
99
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
100
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
101
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
102
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
103
+
104
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
105
+ That means, by default "~/.local/share/<AppName>".
106
+ """
107
+ if system == "win32":
108
+ if appauthor is None:
109
+ appauthor = appname
110
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
111
+ path = os.path.normpath(_get_win_folder(const))
112
+ if appname:
113
+ if appauthor is not False:
114
+ path = os.path.join(path, appauthor, appname)
115
+ else:
116
+ path = os.path.join(path, appname)
117
+ elif system == "darwin":
118
+ path = os.path.expanduser("~/Library/Application Support/")
119
+ if appname:
120
+ path = os.path.join(path, appname)
121
+ else:
122
+ path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
123
+ if appname:
124
+ path = os.path.join(path, appname)
125
+ if appname and version:
126
+ path = os.path.join(path, version)
127
+ return path
128
+
129
+
130
+ def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
131
+ r"""Return full path to the user-shared data dir for this application.
132
+
133
+ "appname" is the name of application.
134
+ If None, just the system directory is returned.
135
+ "appauthor" (only used on Windows) is the name of the
136
+ appauthor or distributing body for this application. Typically
137
+ it is the owning company name. This falls back to appname. You may
138
+ pass False to disable it.
139
+ "version" is an optional version path element to append to the
140
+ path. You might want to use this if you want multiple versions
141
+ of your app to be able to run independently. If used, this
142
+ would typically be "<major>.<minor>".
143
+ Only applied when appname is present.
144
+ "multipath" is an optional parameter only applicable to *nix
145
+ which indicates that the entire list of data dirs should be
146
+ returned. By default, the first item from XDG_DATA_DIRS is
147
+ returned, or '/usr/local/share/<AppName>',
148
+ if XDG_DATA_DIRS is not set
149
+
150
+ Typical site data directories are:
151
+ Mac OS X: /Library/Application Support/<AppName>
152
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
153
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
154
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
155
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
156
+
157
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
158
+
159
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
160
+ """
161
+ if system == "win32":
162
+ if appauthor is None:
163
+ appauthor = appname
164
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
165
+ if appname:
166
+ if appauthor is not False:
167
+ path = os.path.join(path, appauthor, appname)
168
+ else:
169
+ path = os.path.join(path, appname)
170
+ elif system == "darwin":
171
+ path = os.path.expanduser("/Library/Application Support")
172
+ if appname:
173
+ path = os.path.join(path, appname)
174
+ else:
175
+ # XDG default for $XDG_DATA_DIRS
176
+ # only first, if multipath is False
177
+ path = os.getenv(
178
+ "XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
179
+ )
180
+ pathlist = [
181
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
182
+ ]
183
+ if appname:
184
+ if version:
185
+ appname = os.path.join(appname, version)
186
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
187
+
188
+ if multipath:
189
+ path = os.pathsep.join(pathlist)
190
+ else:
191
+ path = pathlist[0]
192
+ return path
193
+
194
+ if appname and version:
195
+ path = os.path.join(path, version)
196
+ return path
197
+
198
+
199
+ def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
200
+ r"""Return full path to the user-specific config dir for this application.
201
+
202
+ "appname" is the name of application.
203
+ If None, just the system directory is returned.
204
+ "appauthor" (only used on Windows) is the name of the
205
+ appauthor or distributing body for this application. Typically
206
+ it is the owning company name. This falls back to appname. You may
207
+ pass False to disable it.
208
+ "version" is an optional version path element to append to the
209
+ path. You might want to use this if you want multiple versions
210
+ of your app to be able to run independently. If used, this
211
+ would typically be "<major>.<minor>".
212
+ Only applied when appname is present.
213
+ "roaming" (boolean, default False) can be set True to use the Windows
214
+ roaming appdata directory. That means that for users on a Windows
215
+ network setup for roaming profiles, this user data will be
216
+ sync'd on login. See
217
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
218
+ for a discussion of issues.
219
+
220
+ Typical user config directories are:
221
+ Mac OS X: ~/Library/Preferences/<AppName>
222
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
223
+ Win *: same as user_data_dir
224
+
225
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
226
+ That means, by default "~/.config/<AppName>".
227
+ """
228
+ if system == "win32":
229
+ path = user_data_dir(appname, appauthor, None, roaming)
230
+ elif system == "darwin":
231
+ path = os.path.expanduser("~/Library/Preferences/")
232
+ if appname:
233
+ path = os.path.join(path, appname)
234
+ else:
235
+ path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
236
+ if appname:
237
+ path = os.path.join(path, appname)
238
+ if appname and version:
239
+ path = os.path.join(path, version)
240
+ return path
241
+
242
+
243
+ def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
244
+ r"""Return full path to the user-shared data dir for this application.
245
+
246
+ "appname" is the name of application.
247
+ If None, just the system directory is returned.
248
+ "appauthor" (only used on Windows) is the name of the
249
+ appauthor or distributing body for this application. Typically
250
+ it is the owning company name. This falls back to appname. You may
251
+ pass False to disable it.
252
+ "version" is an optional version path element to append to the
253
+ path. You might want to use this if you want multiple versions
254
+ of your app to be able to run independently. If used, this
255
+ would typically be "<major>.<minor>".
256
+ Only applied when appname is present.
257
+ "multipath" is an optional parameter only applicable to *nix
258
+ which indicates that the entire list of config dirs should be
259
+ returned. By default, the first item from XDG_CONFIG_DIRS is
260
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
261
+
262
+ Typical site config directories are:
263
+ Mac OS X: same as site_data_dir
264
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
265
+ $XDG_CONFIG_DIRS
266
+ Win *: same as site_data_dir
267
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
268
+
269
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
270
+
271
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
272
+ """
273
+ if system == "win32":
274
+ path = site_data_dir(appname, appauthor)
275
+ if appname and version:
276
+ path = os.path.join(path, version)
277
+ elif system == "darwin":
278
+ path = os.path.expanduser("/Library/Preferences")
279
+ if appname:
280
+ path = os.path.join(path, appname)
281
+ else:
282
+ # XDG default for $XDG_CONFIG_DIRS
283
+ # only first, if multipath is False
284
+ path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
285
+ pathlist = [
286
+ os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
287
+ ]
288
+ if appname:
289
+ if version:
290
+ appname = os.path.join(appname, version)
291
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
292
+
293
+ if multipath:
294
+ path = os.pathsep.join(pathlist)
295
+ else:
296
+ path = pathlist[0]
297
+ return path
298
+
299
+
300
+ def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
301
+ r"""Return full path to the user-specific cache dir for this application.
302
+
303
+ "appname" is the name of application.
304
+ If None, just the system directory is returned.
305
+ "appauthor" (only used on Windows) is the name of the
306
+ appauthor or distributing body for this application. Typically
307
+ it is the owning company name. This falls back to appname. You may
308
+ pass False to disable it.
309
+ "version" is an optional version path element to append to the
310
+ path. You might want to use this if you want multiple versions
311
+ of your app to be able to run independently. If used, this
312
+ would typically be "<major>.<minor>".
313
+ Only applied when appname is present.
314
+ "opinion" (boolean) can be False to disable the appending of
315
+ "Cache" to the base app data dir for Windows. See
316
+ discussion below.
317
+
318
+ Typical user cache directories are:
319
+ Mac OS X: ~/Library/Caches/<AppName>
320
+ Unix: ~/.cache/<AppName> (XDG default)
321
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
322
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
323
+
324
+ On Windows the only suggestion in the MSDN docs is that local settings go in
325
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
326
+ app data dir (the default returned by `user_data_dir` above). Apps typically
327
+ put cache data somewhere *under* the given dir here. Some examples:
328
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
329
+ ...\Acme\SuperApp\Cache\1.0
330
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
331
+ This can be disabled with the `opinion=False` option.
332
+ """
333
+ if system == "win32":
334
+ if appauthor is None:
335
+ appauthor = appname
336
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
337
+ if appname:
338
+ if appauthor is not False:
339
+ path = os.path.join(path, appauthor, appname)
340
+ else:
341
+ path = os.path.join(path, appname)
342
+ if opinion:
343
+ path = os.path.join(path, "Cache")
344
+ elif system == "darwin":
345
+ path = os.path.expanduser("~/Library/Caches")
346
+ if appname:
347
+ path = os.path.join(path, appname)
348
+ else:
349
+ path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
350
+ if appname:
351
+ path = os.path.join(path, appname)
352
+ if appname and version:
353
+ path = os.path.join(path, version)
354
+ return path
355
+
356
+
357
+ def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
358
+ r"""Return full path to the user-specific state dir for this application.
359
+
360
+ "appname" is the name of application.
361
+ If None, just the system directory is returned.
362
+ "appauthor" (only used on Windows) is the name of the
363
+ appauthor or distributing body for this application. Typically
364
+ it is the owning company name. This falls back to appname. You may
365
+ pass False to disable it.
366
+ "version" is an optional version path element to append to the
367
+ path. You might want to use this if you want multiple versions
368
+ of your app to be able to run independently. If used, this
369
+ would typically be "<major>.<minor>".
370
+ Only applied when appname is present.
371
+ "roaming" (boolean, default False) can be set True to use the Windows
372
+ roaming appdata directory. That means that for users on a Windows
373
+ network setup for roaming profiles, this user data will be
374
+ sync'd on login. See
375
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
376
+ for a discussion of issues.
377
+
378
+ Typical user state directories are:
379
+ Mac OS X: same as user_data_dir
380
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
381
+ Win *: same as user_data_dir
382
+
383
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
384
+ to extend the XDG spec and support $XDG_STATE_HOME.
385
+
386
+ That means, by default "~/.local/state/<AppName>".
387
+ """
388
+ if system in ["win32", "darwin"]:
389
+ path = user_data_dir(appname, appauthor, None, roaming)
390
+ else:
391
+ path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
392
+ if appname:
393
+ path = os.path.join(path, appname)
394
+ if appname and version:
395
+ path = os.path.join(path, version)
396
+ return path
397
+
398
+
399
+ def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
400
+ r"""Return full path to the user-specific log dir for this application.
401
+
402
+ "appname" is the name of application.
403
+ If None, just the system directory is returned.
404
+ "appauthor" (only used on Windows) is the name of the
405
+ appauthor or distributing body for this application. Typically
406
+ it is the owning company name. This falls back to appname. You may
407
+ pass False to disable it.
408
+ "version" is an optional version path element to append to the
409
+ path. You might want to use this if you want multiple versions
410
+ of your app to be able to run independently. If used, this
411
+ would typically be "<major>.<minor>".
412
+ Only applied when appname is present.
413
+ "opinion" (boolean) can be False to disable the appending of
414
+ "Logs" to the base app data dir for Windows, and "log" to the
415
+ base cache dir for Unix. See discussion below.
416
+
417
+ Typical user log directories are:
418
+ Mac OS X: ~/Library/Logs/<AppName>
419
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
420
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
421
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
422
+
423
+ On Windows the only suggestion in the MSDN docs is that local settings
424
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
425
+ examples of what some windows apps use for a logs dir.)
426
+
427
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
428
+ value for Windows and appends "log" to the user cache dir for Unix.
429
+ This can be disabled with the `opinion=False` option.
430
+ """
431
+ if system == "darwin":
432
+ path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
433
+ elif system == "win32":
434
+ path = user_data_dir(appname, appauthor, version)
435
+ version = False
436
+ if opinion:
437
+ path = os.path.join(path, "Logs")
438
+ else:
439
+ path = user_cache_dir(appname, appauthor, version)
440
+ version = False
441
+ if opinion:
442
+ path = os.path.join(path, "log")
443
+ if appname and version:
444
+ path = os.path.join(path, version)
445
+ return path
446
+
447
+
448
+ class AppDirs(object):
449
+ """Convenience wrapper for getting application dirs."""
450
+
451
+ def __init__(
452
+ self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
453
+ ):
454
+ self.appname = appname
455
+ self.appauthor = appauthor
456
+ self.version = version
457
+ self.roaming = roaming
458
+ self.multipath = multipath
459
+
460
+ @property
461
+ def user_data_dir(self):
462
+ return user_data_dir(
463
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
464
+ )
465
+
466
+ @property
467
+ def site_data_dir(self):
468
+ return site_data_dir(
469
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
470
+ )
471
+
472
+ @property
473
+ def user_config_dir(self):
474
+ return user_config_dir(
475
+ self.appname, self.appauthor, version=self.version, roaming=self.roaming
476
+ )
477
+
478
+ @property
479
+ def site_config_dir(self):
480
+ return site_config_dir(
481
+ self.appname, self.appauthor, version=self.version, multipath=self.multipath
482
+ )
483
+
484
+ @property
485
+ def user_cache_dir(self):
486
+ return user_cache_dir(self.appname, self.appauthor, version=self.version)
487
+
488
+ @property
489
+ def user_state_dir(self):
490
+ return user_state_dir(self.appname, self.appauthor, version=self.version)
491
+
492
+ @property
493
+ def user_log_dir(self):
494
+ return user_log_dir(self.appname, self.appauthor, version=self.version)
495
+
496
+
497
+ # ---- internal support stuff
498
+
499
+
500
+ def _get_win_folder_from_registry(csidl_name):
501
+ """This is a fallback technique at best. I'm not sure if using the
502
+ registry for this guarantees us the correct answer for all CSIDL_*
503
+ names.
504
+ """
505
+ import winreg as _winreg
506
+
507
+ shell_folder_name = {
508
+ "CSIDL_APPDATA": "AppData",
509
+ "CSIDL_COMMON_APPDATA": "Common AppData",
510
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
511
+ }[csidl_name]
512
+
513
+ key = _winreg.OpenKey(
514
+ _winreg.HKEY_CURRENT_USER,
515
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
516
+ )
517
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
518
+ return dir
519
+
520
+
521
+ def _get_win_folder_with_pywin32(csidl_name):
522
+ from win32com.shell import shell, shellcon
523
+
524
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
525
+ # Try to make this a unicode path because SHGetFolderPath does
526
+ # not return unicode strings when there is unicode data in the
527
+ # path.
528
+ try:
529
+ dir = unicode(dir)
530
+
531
+ # Downgrade to short path name if have highbit chars. See
532
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
533
+ has_high_char = False
534
+ for c in dir:
535
+ if ord(c) > 255:
536
+ has_high_char = True
537
+ break
538
+ if has_high_char:
539
+ try:
540
+ import win32api
541
+
542
+ dir = win32api.GetShortPathName(dir)
543
+ except ImportError:
544
+ pass
545
+ except UnicodeError:
546
+ pass
547
+ return dir
548
+
549
+
550
+ def _get_win_folder_with_ctypes(csidl_name):
551
+ import ctypes
552
+
553
+ csidl_const = {
554
+ "CSIDL_APPDATA": 26,
555
+ "CSIDL_COMMON_APPDATA": 35,
556
+ "CSIDL_LOCAL_APPDATA": 28,
557
+ }[csidl_name]
558
+
559
+ buf = ctypes.create_unicode_buffer(1024)
560
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
561
+
562
+ # Downgrade to short path name if have highbit chars. See
563
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
564
+ has_high_char = False
565
+ for c in buf:
566
+ if ord(c) > 255:
567
+ has_high_char = True
568
+ break
569
+ if has_high_char:
570
+ buf2 = ctypes.create_unicode_buffer(1024)
571
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
572
+ buf = buf2
573
+
574
+ return buf.value
575
+
576
+
577
+ def _get_win_folder_with_jna(csidl_name):
578
+ import array
579
+
580
+ from com.sun import jna
581
+ from com.sun.jna.platform import win32
582
+
583
+ buf_size = win32.WinDef.MAX_PATH * 2
584
+ buf = array.zeros("c", buf_size)
585
+ shell = win32.Shell32.INSTANCE
586
+ shell.SHGetFolderPath(
587
+ None,
588
+ getattr(win32.ShlObj, csidl_name),
589
+ None,
590
+ win32.ShlObj.SHGFP_TYPE_CURRENT,
591
+ buf,
592
+ )
593
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
594
+
595
+ # Downgrade to short path name if have highbit chars. See
596
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
597
+ has_high_char = False
598
+ for c in dir:
599
+ if ord(c) > 255:
600
+ has_high_char = True
601
+ break
602
+ if has_high_char:
603
+ buf = array.zeros("c", buf_size)
604
+ kernel = win32.Kernel32.INSTANCE
605
+ if kernel.GetShortPathName(dir, buf, buf_size):
606
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
607
+
608
+ return dir
609
+
610
+
611
+ if system == "win32":
612
+ try:
613
+ import win32com.shell
614
+
615
+ _get_win_folder = _get_win_folder_with_pywin32
616
+ except ImportError:
617
+ try:
618
+ from ctypes import windll
619
+
620
+ _get_win_folder = _get_win_folder_with_ctypes
621
+ except ImportError:
622
+ try:
623
+ import com.sun.jna
624
+
625
+ _get_win_folder = _get_win_folder_with_jna
626
+ except ImportError:
627
+ _get_win_folder = _get_win_folder_from_registry
628
+
629
+
630
+ # ---- self test code
631
+
632
+ if __name__ == "__main__":
633
+ appname = "MyApp"
634
+ appauthor = "MyCompany"
635
+
636
+ props = (
637
+ "user_data_dir",
638
+ "user_config_dir",
639
+ "user_cache_dir",
640
+ "user_state_dir",
641
+ "user_log_dir",
642
+ "site_data_dir",
643
+ "site_config_dir",
644
+ )
645
+
646
+ print(f"-- app dirs {__version__} --")
647
+
648
+ print("-- app dirs (with optional 'version')")
649
+ dirs = AppDirs(appname, appauthor, version="1.0")
650
+ for prop in props:
651
+ print(f"{prop}: {getattr(dirs, prop)}")
652
+
653
+ print("\n-- app dirs (without optional 'version')")
654
+ dirs = AppDirs(appname, appauthor)
655
+ for prop in props:
656
+ print(f"{prop}: {getattr(dirs, prop)}")
657
+
658
+ print("\n-- app dirs (without optional 'appauthor')")
659
+ dirs = AppDirs(appname)
660
+ for prop in props:
661
+ print(f"{prop}: {getattr(dirs, prop)}")
662
+
663
+ print("\n-- app dirs (with disabled 'appauthor')")
664
+ dirs = AppDirs(appname, appauthor=False)
665
+ for prop in props:
666
+ print(f"{prop}: {getattr(dirs, prop)}")
venv/lib/python3.10/site-packages/torch/_classes.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import types
2
+
3
+ import torch._C
4
+
5
+
6
+ class _ClassNamespace(types.ModuleType):
7
+ def __init__(self, name):
8
+ super().__init__("torch.classes" + name)
9
+ self.name = name
10
+
11
+ def __getattr__(self, attr):
12
+ proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
13
+ if proxy is None:
14
+ raise RuntimeError(f"Class {self.name}.{attr} not registered!")
15
+ return proxy
16
+
17
+
18
+ class _Classes(types.ModuleType):
19
+ __file__ = "_classes.py"
20
+
21
+ def __init__(self):
22
+ super().__init__("torch.classes")
23
+
24
+ def __getattr__(self, name):
25
+ namespace = _ClassNamespace(name)
26
+ setattr(self, name, namespace)
27
+ return namespace
28
+
29
+ @property
30
+ def loaded_libraries(self):
31
+ return torch.ops.loaded_libraries
32
+
33
+ def load_library(self, path):
34
+ """
35
+ Loads a shared library from the given path into the current process.
36
+
37
+ The library being loaded may run global initialization code to register
38
+ custom classes with the PyTorch JIT runtime. This allows dynamically
39
+ loading custom classes. For this, you should compile your class
40
+ and the static registration code into a shared library object, and then
41
+ call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
42
+ shared object.
43
+
44
+ After the library is loaded, it is added to the
45
+ ``torch.classes.loaded_libraries`` attribute, a set that may be inspected
46
+ for the paths of all libraries loaded using this function.
47
+
48
+ Args:
49
+ path (str): A path to a shared library to load.
50
+ """
51
+ torch.ops.load_library(path)
52
+
53
+
54
+ # The classes "namespace"
55
+ classes = _Classes()
venv/lib/python3.10/site-packages/torch/_compile.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ APIs related to torch.compile which lazily import torch._dynamo to avoid
3
+ circular dependencies.
4
+ """
5
+ import functools
6
+
7
+
8
+ def _disable_dynamo(fn=None, recursive=True):
9
+ """
10
+ This API should be only used inside torch, external users should still use
11
+ torch._dynamo.disable. The main goal of this API is to avoid circular
12
+ imports issues that is common while using _dynamo.disable inside torch
13
+ itself.
14
+
15
+ This API avoids it by lazily importing torch._dynamo from the import time to
16
+ the invocation of the decorated function.
17
+ """
18
+ if fn is not None:
19
+
20
+ @functools.wraps(fn)
21
+ def inner(*args, **kwargs):
22
+ import torch._dynamo
23
+
24
+ return torch._dynamo.disable(fn, recursive)(*args, **kwargs)
25
+
26
+ return inner
27
+ else:
28
+ # decorator usage like @_disable_dynamo(recursive=False). The resulting
29
+ # object expects the original decorated function as the arg.
30
+ return functools.partial(_disable_dynamo, recursive=recursive)
venv/lib/python3.10/site-packages/torch/_custom_ops.py ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from torch._custom_op.impl import (
4
+ _custom_op_with_schema,
5
+ _find_custom_op,
6
+ infer_schema,
7
+ parse_qualname,
8
+ validate_namespace,
9
+ )
10
+ from torch.library import get_ctx
11
+
12
+ __all__ = [
13
+ "custom_op",
14
+ "impl",
15
+ "impl_abstract",
16
+ "get_ctx",
17
+ "impl_save_for_backward",
18
+ "impl_backward",
19
+ ]
20
+
21
+
22
+ def custom_op(qualname, func_or_schema=None):
23
+ r"""Register a new custom operator
24
+
25
+ In PyTorch, defining an op (short for "operator") is a two step-process:
26
+ - we need to define the op (by providing an operator name and schema)
27
+ - we need to implement behavior for how the operator interacts with
28
+ various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
29
+
30
+ This entrypoint defines the custom operator (the first step)
31
+ you must then perform the second step by calling various
32
+ ``impl_*`` APIs.
33
+
34
+ This API may be used as a decorator (see examples).
35
+
36
+ For a detailed guide on custom ops, please see
37
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
38
+
39
+ Arguments:
40
+ qualname (str): Should be a string that looks like
41
+ "namespace::operator_name". Operators in PyTorch need a namespace to
42
+ avoid name collisions; a given operator may only be created once.
43
+ If you are writing a Python library, we recommend the namespace to
44
+ be the name of your top-level module.
45
+ func_or_schema (Union[Callable, str]): Each PyTorch operator needs a
46
+ schema that tells PyTorch the types of the inputs/outputs.
47
+ If this is a Callable, we will automatically infer the schema from
48
+ the type annotations on the function (see examples). Otherwise,
49
+ if you don't want to use type annotations, you may provide us the
50
+ schema string.
51
+
52
+ Example::
53
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
54
+ >>> import torch
55
+ >>> import numpy as np
56
+ >>> from torch import Tensor
57
+ >>>
58
+ >>> # Step 1: define the custom op.
59
+ >>> # We need to provide the API a "prototype function"
60
+ >>> # (a function that returns NotImplementedError), from which
61
+ >>> # we will infer the types of the inputs and outputs.
62
+ >>> @torch._custom_ops.custom_op("mylibrary::numpy_sin")
63
+ >>> def numpy_sin(x: Tensor) -> Tensor:
64
+ >>> raise NotImplementedError()
65
+ >>>
66
+ >>> # The custom op is now accessible via the torch.ops module:
67
+ >>> torch.ops.mylibrary.numpy_sin
68
+ >>>
69
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
70
+ >>>
71
+ >>> # Register an implementation for CPU tensors
72
+ >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu")
73
+ >>> def numpy_sin_impl_cpu(x):
74
+ >>> return torch.from_numpy(np.sin(x.numpy()))
75
+ >>>
76
+ >>> # Register an implementation for CUDA tensors
77
+ >>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda")
78
+ >>> def numpy_sin_impl_cuda(x):
79
+ >>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
80
+ >>>
81
+ >>> x = torch.randn(3)
82
+ >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu
83
+ >>>
84
+ >>> x_cuda = x.cuda()
85
+ >>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda
86
+
87
+ """
88
+ ns, name = parse_qualname(qualname)
89
+ validate_namespace(ns)
90
+
91
+ def inner(func):
92
+ if not inspect.isfunction(func):
93
+ raise ValueError(
94
+ f"custom_op(...)(func): Expected `func` to be a Python "
95
+ f"function, got: {type(func)}"
96
+ )
97
+
98
+ if func.__name__ != name:
99
+ raise ValueError(
100
+ f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
101
+ f"to have name '{name}' but got '{func.__name__}'. "
102
+ f"Please either change the name of `func` or the qualname that "
103
+ f"is passed to `custom_op`"
104
+ )
105
+
106
+ schema = infer_schema(func)
107
+ _custom_op_with_schema(qualname, schema)
108
+ return func
109
+
110
+ if func_or_schema is None:
111
+ return inner
112
+ if isinstance(func_or_schema, str):
113
+ _custom_op_with_schema(qualname, func_or_schema)
114
+ else:
115
+ return inner(func_or_schema)
116
+
117
+
118
+ def impl(qualname, *, device_types=("cpu", "cuda"), func=None):
119
+ r"""Register an implementation for a device type for this custom op.
120
+
121
+ If the op is passed multiple Tensor inputs with different device
122
+ types, it will dispatch to the registered implementation for the highest
123
+ priority device type among those present.
124
+ The supported device types, in order of priority, are {'cuda', 'cpu'}.
125
+
126
+ This API may be used as a decorator (see examples).
127
+
128
+ For a detailed guide on custom ops, please see
129
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
130
+
131
+ Arguments:
132
+ device_types (str or Iterable[str]): the device type(s) to register the function for.
133
+
134
+ Example::
135
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
136
+ >>> import torch
137
+ >>> import numpy as np
138
+ >>> from torch import Tensor
139
+ >>>
140
+ >>> # Step 1: define the custom op.
141
+ >>> # We need to provide the API a "prototype function"
142
+ >>> # (a function that returns NotImplementedError), from which
143
+ >>> # we will infer the types of the inputs and outputs.
144
+ >>> @torch._custom_ops.custom_op("mylibrary::numpy_cos")
145
+ >>> def numpy_cos(x: Tensor) -> Tensor:
146
+ >>> raise NotImplementedError()
147
+ >>>
148
+ >>> # The custom op is now accessible via the torch.ops module:
149
+ >>> torch.ops.mylibrary.numpy_cos
150
+ >>>
151
+ >>> # Step 2: Register an implementation for various PyTorch subsystems
152
+ >>>
153
+ >>> # Register an implementation for CPU tensors
154
+ >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu")
155
+ >>> def numpy_cos_impl_cpu(x):
156
+ >>> return torch.from_numpy(np.cos(x.numpy()))
157
+ >>>
158
+ >>> # Register an implementation for CUDA tensors
159
+ >>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda")
160
+ >>> def numpy_cos_impl_cuda(x):
161
+ >>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
162
+ >>>
163
+ >>> x = torch.randn(3)
164
+ >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu
165
+ >>>
166
+ >>> x_cuda = x.cuda()
167
+ >>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda
168
+
169
+ """
170
+
171
+ def inner(func):
172
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
173
+ custom_op.impl(device_types, _stacklevel=3)(func)
174
+ return func
175
+
176
+ if func is None:
177
+ return inner
178
+ return inner(func)
179
+
180
+
181
+ def impl_abstract(qualname, *, func=None):
182
+ r"""Register an abstract implementation for this operator.
183
+
184
+ An "abstract implementation" specifies the behavior of this operator on
185
+ Tensors that carry no data. Given some input Tensors with certain properties
186
+ (sizes/strides/storage_offset/device), it specifies what the properties of
187
+ the output Tensors are.
188
+
189
+ The abstract implementation has the same signature as the operator.
190
+ It is run for both FakeTensors and meta tensors. To write an abstract
191
+ implementation, assume that all Tensor inputs to the operator are
192
+ regular CPU/CUDA/Meta tensors, but they do not have storage, and
193
+ you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
194
+ The abstract implementation must consist of only PyTorch operations
195
+ (and may not directly access the storage or data of any input or
196
+ intermediate Tensors).
197
+
198
+ This API may be used as a decorator (see examples).
199
+
200
+ For a detailed guide on custom ops, please see
201
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
202
+
203
+ Examples::
204
+ >>> import numpy as np
205
+ >>> from torch import Tensor
206
+ >>>
207
+ >>> # Example 1: an operator without data-dependent output shape
208
+ >>> @torch._custom_ops.custom_op("mylibrary::custom_linear")
209
+ >>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
210
+ >>> raise NotImplementedError()
211
+ >>>
212
+ >>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear")
213
+ >>> def custom_linear_abstract(x, weight):
214
+ >>> assert x.dim() == 2
215
+ >>> assert weight.dim() == 2
216
+ >>> assert bias.dim() == 1
217
+ >>> assert x.shape[1] == weight.shape[1]
218
+ >>> assert weight.shape[0] == bias.shape[0]
219
+ >>> assert x.device == weight.device
220
+ >>>
221
+ >>> return (x @ weight.t()) + bias
222
+ >>>
223
+ >>> # Example 2: an operator with data-dependent output shape
224
+ >>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero')
225
+ >>> def custom_nonzero(x: Tensor) -> Tensor:
226
+ >>> ...
227
+ >>>
228
+ >>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero")
229
+ >>> def custom_nonzero_abstract(x):
230
+ >>> # Number of nonzero-elements is data-dependent.
231
+ >>> # Since we cannot peek at the data in an abstract impl,
232
+ >>> # we use the ctx object to construct a new symint that
233
+ >>> # represents the data-dependent size.
234
+ >>> ctx = torch._custom_ops.get_ctx()
235
+ >>> nnz = ctx.create_unbacked_symint()
236
+ >>> shape = [x.dim(), nnz]
237
+ >>> result = x.new_empty(shape, dtype=torch.long)
238
+ >>> return result
239
+ >>>
240
+ >>> @torch._custom_ops.impl("mylibrary::custom_nonzero")
241
+ >>> def custom_nonzero_impl(x):
242
+ >>> x_np = to_numpy(x)
243
+ >>> res = np.stack(np.nonzero(x_np), axis=1)
244
+ >>> # unbacked symbolic ints in PyTorch must be >= 2, so we
245
+ >>> # constrain the range to at least 2
246
+ >>> if res.shape[0] <= 1:
247
+ >>> raise RuntimeError("not supported")
248
+ >>> return torch.tensor(res, device=x.device)
249
+
250
+ """
251
+ import torch.library
252
+
253
+ return torch.library.impl_abstract(qualname, func, _stacklevel=2)
254
+
255
+
256
+ def impl_save_for_backward(qualname, *, func=None):
257
+ r"""Register a function that tells us what to save for backward.
258
+
259
+ Please see :func:`impl_backward` for more details.
260
+ """
261
+
262
+ def inner(func):
263
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
264
+ custom_op.impl_save_for_backward(_stacklevel=3)(func)
265
+ return func
266
+
267
+ if func is None:
268
+ return inner
269
+ return inner(func)
270
+
271
+
272
+ def impl_backward(qualname, output_differentiability=None, *, func=None):
273
+ r"""Registers a backward formula for an operator.
274
+
275
+ In order for an operator to work with autograd, you need to register
276
+ a backward formula. There are two pieces to this:
277
+ 1. You must give us a function to specify what to save for backward.
278
+ Call this the "save for backward" function.
279
+ 2. You must give us a function that computes gradients. Call this the
280
+ "backward" function.
281
+
282
+ Use `impl_save_for_backward` to define a "save for backward" function
283
+ that specifies what gets saved for backward. The function should accept
284
+ two arguments ``(inputs, output)`` and return the quantities to be saved
285
+ for backward.
286
+
287
+ During runtime, when you call the operator in a forwards pass, PyTorch
288
+ will invoke the "save for backward" function with the inputs and output
289
+ of the operator.
290
+
291
+ Use `impl_backward` to define the "backward" function. The backward
292
+ function must accept ``(ctx, saved, *grads)``:
293
+ - ``ctx`` is a context object where we may provide information
294
+ - ``saved`` is exactly what gets returned from the "save for backward"
295
+ function
296
+ - ``grads`` is one or more gradients. The number of gradients matches
297
+ the number of outputs of the operator.
298
+
299
+ The backward function must return a dict that maps the name of
300
+ an input to the operator to its corresponding gradient. All inputs that
301
+ were declared to be Tensors in the operator definition must be accounted
302
+ for in the dict. The gradient may be a Tensor or None.
303
+
304
+ For a detailed guide on custom ops, please see
305
+ https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
306
+
307
+ """
308
+
309
+ def inner(func):
310
+ custom_op = _find_custom_op(qualname, also_check_torch_library=True)
311
+ custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)
312
+ return func
313
+
314
+ if func is None:
315
+ return inner
316
+ return inner(func)
317
+
318
+
319
+ def _destroy(qualname):
320
+ """De-registers a custom op. For testing purposes only"""
321
+ custom_op = _find_custom_op(qualname)
322
+ custom_op._destroy()
venv/lib/python3.10/site-packages/torch/_deploy.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import torch
4
+ from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
5
+ from torch.package._package_pickler import create_pickler
6
+ from torch.package._package_unpickler import PackageUnpickler
7
+ from torch.serialization import _maybe_decode_ascii
8
+
9
+
10
+ def _save_storages(importer, obj):
11
+ serialized_storages = []
12
+ serialized_dtypes = []
13
+
14
+ importer = importer if isinstance(importer, torch.package.PackageImporter) else None
15
+ importers: Importer
16
+ if importer is not None:
17
+ importers = OrderedImporter(importer, sys_importer)
18
+ else:
19
+ importers = sys_importer
20
+
21
+ def persistent_id(obj):
22
+ if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
23
+ if isinstance(obj, torch.storage.TypedStorage):
24
+ # TODO: Once we decide to break serialization FC, we can
25
+ # remove this case
26
+ storage = obj._untyped_storage
27
+ dtype = obj.dtype
28
+ else:
29
+ storage = obj
30
+ dtype = torch.uint8
31
+
32
+ serialized_storages.append(obj)
33
+ serialized_dtypes.append(dtype)
34
+ return ("storage", len(serialized_storages) - 1)
35
+
36
+ if hasattr(obj, "__reduce_deploy__"):
37
+ if _serialized_reduces.get(id(obj)) is None:
38
+ _serialized_reduces[id(obj)] = (
39
+ "reduce_deploy",
40
+ id(obj),
41
+ *obj.__reduce_deploy__(importers),
42
+ )
43
+ return _serialized_reduces[id(obj)]
44
+
45
+ return None
46
+
47
+ # Write the pickle data for `obj`
48
+ data_buf = io.BytesIO()
49
+ pickler = create_pickler(data_buf, importers)
50
+ pickler.persistent_id = persistent_id
51
+ pickler.dump(obj)
52
+ data_value = data_buf.getvalue()
53
+ return (
54
+ data_value,
55
+ serialized_storages,
56
+ serialized_dtypes,
57
+ importer.zip_reader if importer else None,
58
+ )
59
+
60
+
61
+ def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
62
+ def persistent_load(saved_id):
63
+ assert isinstance(saved_id, tuple)
64
+ typename = _maybe_decode_ascii(saved_id[0])
65
+ data = saved_id[1:]
66
+
67
+ if typename == "storage":
68
+ # TODO: Once we decide to break serialization FC, we can
69
+ # stop wrapping with TypedStorage
70
+ storage = serialized_storages[data[0]]
71
+ dtype = serialized_dtypes[data[0]]
72
+ return torch.storage.TypedStorage(
73
+ wrap_storage=storage.untyped(), dtype=dtype
74
+ )
75
+
76
+ if typename == "reduce_deploy":
77
+ reduce_id, func, args = data
78
+ if reduce_id not in _loaded_reduces:
79
+ _loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
80
+ return _loaded_reduces[reduce_id]
81
+
82
+ return None
83
+
84
+ importer: Importer
85
+ if zip_reader is not None:
86
+ importer = OrderedImporter(_get_package(zip_reader), sys_importer)
87
+ else:
88
+ importer = sys_importer
89
+
90
+ unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
91
+ unpickler.persistent_load = persistent_load # type: ignore[method-assign]
92
+ result = _deploy_objects[id] = unpickler.load()
93
+ return result
94
+
95
+
96
+ def _get_package(zip_reader):
97
+ if zip_reader not in _raw_packages:
98
+ _raw_packages[zip_reader] = PackageImporter(zip_reader)
99
+ return _raw_packages[zip_reader]
100
+
101
+
102
+ _raw_packages: dict = {}
103
+ _deploy_objects: dict = {}
104
+ _serialized_reduces: dict = {}
105
+ _loaded_reduces: dict = {}
venv/lib/python3.10/site-packages/torch/_guards.py ADDED
@@ -0,0 +1,879 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+
5
+ import dataclasses
6
+ import enum
7
+ import functools
8
+ import logging
9
+ import threading
10
+ import traceback
11
+ import unittest.mock
12
+ import weakref
13
+ from abc import ABC, abstractmethod
14
+ from contextlib import contextmanager
15
+ from typing import (
16
+ Any,
17
+ Callable,
18
+ Dict,
19
+ Generic,
20
+ List,
21
+ NamedTuple,
22
+ Optional,
23
+ Set,
24
+ Tuple,
25
+ TYPE_CHECKING,
26
+ TypeVar,
27
+ )
28
+
29
+ import torch
30
+ from torch.utils import _pytree as pytree
31
+ from torch.utils._traceback import CapturedTraceback
32
+ from torch.utils.weak import WeakTensorKeyDictionary
33
+
34
+ log = logging.getLogger(__name__)
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ # Import the following modules during type checking to enable code intelligence features,
39
+ # such as auto-completion in tools like pylance, even when these modules are not explicitly
40
+ # imported in user code.
41
+
42
+ import sympy
43
+
44
+
45
+ """
46
+ torch._guards is the definitional source of truth for general purpose guard structures.
47
+
48
+ An important thing to keep in mind here is the preservation of layering. There should be no dynamo notions,
49
+ and no guard installation notions here.
50
+ """
51
+
52
+
53
+ class CompileId(NamedTuple):
54
+ frame_id: int
55
+ # This id is per-frame, and counts how many times we've compiled this
56
+ # frame. This could have been a global id but having this be per-frame
57
+ # gives you a better intuitive sense for how many recompiles have occurred
58
+ # so far.
59
+ frame_compile_id: int
60
+ # TODO: consider also tracking the recompilation count
61
+
62
+ def __str__(self):
63
+ return f"{self.frame_id}/{self.frame_compile_id}"
64
+
65
+
66
+ class TraceId(NamedTuple):
67
+ compile_id: CompileId
68
+ # This starts off as 0, and every time we restart analysis it goes
69
+ # up by one
70
+ attempt: int
71
+
72
+ def __str__(self):
73
+ if self.attempt == 0:
74
+ return str(self.compile_id)
75
+ else:
76
+ return f"{self.compile_id}_{self.attempt}"
77
+
78
+
79
+ class GuardSource(enum.Enum):
80
+ LOCAL = 0
81
+ GLOBAL = 1
82
+ LOCAL_NN_MODULE = 2
83
+ GLOBAL_NN_MODULE = 3
84
+ CONSTANT = 4
85
+ RANDOM_VALUE = 5
86
+ SHAPE_ENV = 6
87
+ LOCAL_FSDP_MODULE = 7
88
+ GLOBAL_FSDP_MODULE = 8
89
+ BACKWARD_STATE = 9
90
+ EPHEMERAL = 10
91
+ SYNTHETIC_LOCAL = 11
92
+
93
+ def is_fsdp_module(self) -> bool:
94
+ return self in (GuardSource.GLOBAL_FSDP_MODULE, GuardSource.LOCAL_FSDP_MODULE)
95
+
96
+ def is_nn_module(self) -> bool:
97
+ return (
98
+ self
99
+ in (
100
+ GuardSource.GLOBAL_NN_MODULE,
101
+ GuardSource.LOCAL_NN_MODULE,
102
+ )
103
+ or self.is_fsdp_module()
104
+ )
105
+
106
+ def is_local(self):
107
+ return self in (
108
+ GuardSource.LOCAL,
109
+ GuardSource.LOCAL_NN_MODULE,
110
+ GuardSource.LOCAL_FSDP_MODULE,
111
+ )
112
+
113
+
114
+ """
115
+ Base class for a "GuardBuilder" role.
116
+
117
+ The GuardBuilderBase role is to represent a scope within which to build a guard. The name is a little
118
+ confusing, as its not a builder, but for the sake of avoiding a lot of renames and keeping the original reference
119
+ to torchdynamo's GuardBuilder.
120
+
121
+ Note: create_fn is invoked with a GuardBuilderBase and a Guard. A GuardBuilder is chosen based
122
+ on GuardSource's select function.
123
+
124
+ There is value in keeping this GuardBuilderBase empty to keep layering clean.
125
+ """
126
+
127
+
128
+ class GuardBuilderBase:
129
+ pass
130
+
131
+
132
+ class ShapeGuard(NamedTuple):
133
+ expr: sympy.Expr
134
+ stack: CapturedTraceback
135
+
136
+
137
+ @dataclasses.dataclass
138
+ class Guard:
139
+ # originating_source is the source that called the make_guard method to
140
+ # construct this guard object. The property name specifies what exactly it
141
+ # is the guard is guarding on. The meaning of the name is dependent on the
142
+ # create_fn; you must look at the use-site inside create_fn to know what
143
+ # name means.
144
+ #
145
+ # That being said, although you might think this is just a "name", name is
146
+ # usually an arbitrary Python expression that will be evaluated with all
147
+ # globals (and locals, if you create a LOCAL guard) to extract the Python
148
+ # object that we want to perform guard tests on. This evaluation
149
+ # typically happens in GuardBuilder.eval. In these cases, name is
150
+ # typically produced by originating_source.name() (not to be confused with
151
+ # GuardSource - the property source).
152
+ #
153
+ # Occasionally, name is not a valid Python expression; sometimes
154
+ # it is meaningless. Example create_fns that are like this include
155
+ # GRAD_MODE and SHAPE_ENV.
156
+ originating_source: Source
157
+ create_fn: Callable[[GuardBuilderBase, Guard], None]
158
+
159
+ # Export only. These values are written to at time of guard check_fn creation.
160
+ guard_types: Optional[List[str]] = None
161
+ code_list: Optional[List[str]] = None
162
+ obj_weakref: Optional[object] = None
163
+ guarded_class_weakref: Optional[type] = None
164
+
165
+ stack: Optional[CapturedTraceback] = None
166
+ user_stack: Optional[traceback.StackSummary] = None
167
+ _hash: Optional[int] = None
168
+
169
+ def __hash__(self):
170
+ if self._hash is None:
171
+ self._hash = hash((self.name, self.source, id(self.create_fn)))
172
+ return self._hash
173
+
174
+ def sort_key(self):
175
+ return (
176
+ self.source.value if self.source else -1,
177
+ len(self.name),
178
+ self.name,
179
+ self.inner_create_fn().__code__.co_firstlineno,
180
+ )
181
+
182
+ def __lt__(self, other):
183
+ return self.sort_key() < other.sort_key()
184
+
185
+ def inner_create_fn(self):
186
+ if isinstance(self.create_fn, functools.partial):
187
+ return self.create_fn.func
188
+ else:
189
+ return self.create_fn
190
+
191
+ @property
192
+ def name(self) -> str:
193
+ return self.originating_source.name()
194
+
195
+ @property
196
+ def source(self) -> GuardSource:
197
+ return self.originating_source.guard_source()
198
+
199
+ @staticmethod
200
+ def weakref_to_str(obj_weakref):
201
+ """
202
+ This is a workaround of a Python weakref bug.
203
+
204
+ `obj_weakref` is instance returned by `weakref.ref`,
205
+ `str(obj_weakref)` is buggy if the original obj overrides __getattr__, e.g:
206
+
207
+ class MyConfig(dict):
208
+ def __getattr__(self, x):
209
+ return self[x]
210
+
211
+ obj = MyConfig(offset=5)
212
+ obj_weakref = weakref.ref(obj)
213
+ str(obj_weakref) # raise error: KeyError: '__name__'
214
+ """
215
+ if isinstance(obj_weakref, weakref.ReferenceType):
216
+ obj = obj_weakref()
217
+ if obj is not None:
218
+ return f"<weakref at {hex(id(obj_weakref))}; to '{obj.__class__.__name__}' at {hex(id(obj))}>"
219
+ else:
220
+ return f"<weakref at {hex(id(obj_weakref))}; dead>"
221
+ else:
222
+ return str(obj_weakref)
223
+
224
+ def __repr__(self):
225
+ s = f"""
226
+ {self.source.name.lower() if self.source else ""} {repr(self.name)} {self.inner_create_fn().__name__}
227
+ {{
228
+ 'guard_types': {self.guard_types},
229
+ 'code': {self.code_list},
230
+ 'obj_weakref': {self.weakref_to_str(self.obj_weakref)}
231
+ 'guarded_class': {self.guarded_class_weakref}
232
+ }}
233
+ """
234
+ return s
235
+
236
+ def __str__(self):
237
+ output = f"Name: {repr(self.name)}\n"
238
+ source = self.source.name.lower() if self.source else ""
239
+ output += f" Source: {source}\n"
240
+ output += f" Create Function: {self.inner_create_fn().__name__}\n"
241
+ output += f" Guard Types: {self.guard_types}\n"
242
+ output += f" Code List: {self.code_list}\n"
243
+ output += f" Object Weakref: {self.weakref_to_str(self.obj_weakref)}\n"
244
+ output += f" Guarded Class Weakref: {self.guarded_class_weakref}\n"
245
+ return output
246
+
247
+ def create(self, builder: GuardBuilderBase):
248
+ try:
249
+ return self.create_fn(builder, self)
250
+ except Exception:
251
+ log.error("Error while creating guard:\n%s", str(self).rstrip())
252
+ if self.stack:
253
+ log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
254
+ raise
255
+
256
+ def is_nn_module(self):
257
+ return self.source.is_nn_module()
258
+
259
+ def is_fsdp_module(self):
260
+ return self.source.is_fsdp_module()
261
+
262
+ def is_local(self):
263
+ return self.source.is_local()
264
+
265
+ def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
266
+ if not self.guard_types:
267
+ self.guard_types = list()
268
+
269
+ self.guard_types.append(guard_type)
270
+
271
+ assert self.guarded_class_weakref in (
272
+ guarded_class,
273
+ None,
274
+ ), "Guarded class id must be identical, or None"
275
+ self.guarded_class_weakref = guarded_class
276
+
277
+ if not self.code_list:
278
+ self.code_list = code_list
279
+ else:
280
+ self.code_list.extend(code_list)
281
+
282
+ assert self.obj_weakref in (
283
+ obj_weakref,
284
+ None,
285
+ ), "Guarded object must be identical, or None"
286
+ self.obj_weakref = obj_weakref
287
+
288
+
289
+ T = TypeVar("T")
290
+
291
+ """
292
+ Parent structure for guard env expressions.
293
+ A GuardEnvExpr can have any subtype.
294
+ Note: All subtypes must be handled exhaustively in
295
+ torch._dynamo.guards._parse_guard_env_guards to avoid a RuntimeError.
296
+ """
297
+
298
+
299
+ @dataclasses.dataclass
300
+ class GuardEnvExpr:
301
+ pass
302
+
303
+
304
+ """
305
+ A class representing a pair of duplicate inputs.
306
+ input_pos_a and input_pos_b are input positions we have deduped.
307
+ """
308
+
309
+
310
+ @dataclasses.dataclass
311
+ class DuplicateInputs(GuardEnvExpr):
312
+ input_source_a: Source
313
+ input_source_b: Source
314
+
315
+ def __post_init__(self):
316
+ assert self.input_source_a != self.input_source_b
317
+
318
+
319
+ """
320
+ Checkpointable is an interface for driving state snapshotting, left purposely vague for now.
321
+
322
+ copy_graphstate() -> T, a somewhat legacy name, is expected to emit a snapshot of any type that
323
+ can also be taken in at restore_graphstate(T) calls.
324
+
325
+ When to snapshot, is, at the moment, an implementation detail of upstream callers. Checkpointable
326
+ does not provide any garuantees around consistency, idempotency, or safety of calling its APIs, yet.
327
+
328
+ In the future, it will have a closer coupling to a generic Checkpoint management system.
329
+ """
330
+
331
+
332
+ class Checkpointable(ABC, Generic[T]):
333
+ @abstractmethod
334
+ def copy_graphstate(self) -> T:
335
+ ...
336
+
337
+ @abstractmethod
338
+ def restore_graphstate(self, state: T):
339
+ ...
340
+
341
+
342
+ class GuardsCheckpointState:
343
+ """
344
+ The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext
345
+ """
346
+
347
+ dynamo_guards: Set[Guard] = set()
348
+
349
+ def __init__(self, dynamo_guards):
350
+ self.dynamo_guards = dynamo_guards
351
+
352
+ def diff(self, other):
353
+ """
354
+ Produces a delta against another GuardsCheckpointState.
355
+
356
+ Returns None if no delta is found, otherwise, return a set() of mismatched
357
+ Guard type objects.
358
+ """
359
+ r = self.dynamo_guards.difference(other.dynamo_guards)
360
+ if len(r) == 0:
361
+ return None
362
+ return r
363
+
364
+ def __eq__(self, other):
365
+ return self.diff(other) is None
366
+
367
+
368
+ class ModuleContextCheckpointState:
369
+ nn_modules: Dict[str, torch.nn.Module] = {}
370
+
371
+ def __init__(self, nn_modules):
372
+ self.nn_modules = nn_modules
373
+
374
+ def diff(self, other):
375
+ """
376
+ Produces a delta against another ModuleContextCheckpointState.
377
+
378
+ Returns None if no delta is found, otherwise, return a set() of mismatched
379
+ module key names.
380
+ """
381
+ r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
382
+ if len(r) == 0:
383
+ return None
384
+ return r
385
+
386
+ def __eq__(self, other):
387
+ return self.diff(other) is None
388
+
389
+
390
+ class ModuleContext(Checkpointable[ModuleContextCheckpointState]):
391
+ def __init__(self):
392
+ self.nn_modules: Dict[str, Any] = {}
393
+
394
+ def copy_graphstate(self):
395
+ return ModuleContextCheckpointState(dict(self.nn_modules))
396
+
397
+ def restore_graphstate(self, state):
398
+ assert isinstance(state, ModuleContextCheckpointState)
399
+ self.nn_modules = state.nn_modules
400
+
401
+
402
+ class GlobalContextCheckpointState:
403
+ global_state: Dict[str, Tuple[Callable, ...]] = {}
404
+
405
+ def __init__(self, global_states):
406
+ self.global_state = global_states
407
+
408
+ def diff(self, other):
409
+ """
410
+ Produces a delta against another GlobalContextCheckpointState.
411
+
412
+ Returns None if no delta is found, otherwise, return a set() of mismatched
413
+ global key names.
414
+ """
415
+ r = set(self.global_state.keys()).difference(set(other.global_state.keys()))
416
+ if len(r) == 0:
417
+ return None
418
+ return r
419
+
420
+ def __eq__(self, other):
421
+ return self.diff(other) is None
422
+
423
+
424
+ class GlobalContext(Checkpointable[GlobalContextCheckpointState]):
425
+ """
426
+ This keeps track of the global torch state during tracing of a function.
427
+ For example, torch.is_grad_enabled.
428
+ """
429
+
430
+ _supported_global_states = {
431
+ "grad_enabled",
432
+ "torch_function_enabled",
433
+ "autocast_enabled",
434
+ "autocast_cpu_enabled",
435
+ "autocast_gpu_dtype",
436
+ "autocast_cpu_dtype",
437
+ "autocast_cache_enabled",
438
+ }
439
+
440
+ def __init__(self):
441
+ self.global_state: Dict[str, Tuple[Callable, ...]] = {}
442
+
443
+ def copy_graphstate(self):
444
+ return GlobalContextCheckpointState(dict(self.global_state))
445
+
446
+ def restore_graphstate(self, state):
447
+ assert isinstance(state, GlobalContextCheckpointState)
448
+ self.global_state = state.global_state
449
+ assert (
450
+ len(self.global_state) == len(self._supported_global_states)
451
+ and set(self.global_state.keys()) == self._supported_global_states
452
+ ), "Global state mismatch"
453
+ for func, args in self.global_state.values():
454
+ func(args)
455
+
456
+
457
+ """
458
+ A GuardsContext is a checkpointable representation of all the guards in the current tracing
459
+ context. It's lifecycle is bound 1:1 to the tracing context, and it should never be instantiated
460
+ directly outside of it. For passing around internal state representations of this object,
461
+ prefer to extract them with copy_graphstate to produce a GuardsCheckpointState.
462
+ """
463
+
464
+
465
+ # Like a Set[Guard] but will record the user stack on all guards at the
466
+ # time they were installed at their destination
467
+ class GuardsSet:
468
+ def __init__(self, inner=None):
469
+ if inner is None:
470
+ inner = set()
471
+ self.inner = inner
472
+
473
+ def __iter__(self):
474
+ return iter(self.inner)
475
+
476
+ def __len__(self):
477
+ return len(self.inner)
478
+
479
+ # Subtraction along with bool is typically used to determine the delta of
480
+ # added guards between checkpoints for higher order ops
481
+ def __sub__(self, other):
482
+ return GuardsSet(self.inner - other.inner)
483
+
484
+ def __bool__(self):
485
+ return bool(self.inner)
486
+
487
+ def add(self, guard: Guard, *, collect_debug_stack=True, skip=0):
488
+ if guard in self.inner:
489
+ return
490
+ if collect_debug_stack:
491
+ if guard.stack is None:
492
+ guard.stack = CapturedTraceback.extract(skip=1 + skip)
493
+ if guard.user_stack is None:
494
+ guard.user_stack = TracingContext.extract_stack()
495
+ self.inner.add(guard)
496
+
497
+ def update(self, *others: Set[Guard]):
498
+ for o in others:
499
+ for g in o:
500
+ self.add(g, skip=1)
501
+
502
+ def remove_guards_with_source(self, source):
503
+ """Delete all guards with a given source"""
504
+ self.inner = {g for g in self.inner if g.originating_source != source}
505
+
506
+
507
+ class GuardsContext(Checkpointable[GuardsCheckpointState]):
508
+ def __init__(self):
509
+ self.dynamo_guards: GuardsSet = GuardsSet()
510
+ self.aotautograd_guards: List[GuardEnvExpr] = []
511
+
512
+ def copy_graphstate(self):
513
+ return GuardsCheckpointState(set(self.dynamo_guards.inner))
514
+
515
+ def restore_graphstate(self, state):
516
+ # NB: "steals" the passed in state
517
+ assert isinstance(state, GuardsCheckpointState)
518
+ self.dynamo_guards = GuardsSet(state.dynamo_guards)
519
+
520
+
521
+ _TLS = threading.local()
522
+
523
+ """
524
+ TracingContext is the source of truth for all currently accumulated information
525
+ needed to trace. Its lifecycle is kept 1:1 when using TorchDynamo, but other systems
526
+ are open to managing their own TracingContext with that in mind.
527
+
528
+ The purpose of TracingContext is not to be a dumping ground, or god object, but rather to avoid
529
+ having to plumb complex subsystems across multiple verticals.
530
+
531
+ Ex: A common example is guard accumulation between dynamo, shape_env, aot_autograd, and inductor.
532
+ Accessing the current tracing context via
533
+ TracingContext.get() allows users to accumulate their own guards for processing, without needing to know how
534
+ to plumb objects back up to where frame interpretation happened.
535
+
536
+ Note that you can end up with multiple TracingContext for a single compilation
537
+ of a frame, as we reset the TracingContext whenever we restart analysis.
538
+ CompileContext is a more overarching context that encompasses multiple restarts.
539
+ """
540
+
541
+
542
+ class CompileContext:
543
+ @staticmethod
544
+ def get() -> CompileContext:
545
+ assert _TLS.compile_context is not None
546
+ return _TLS.compile_context
547
+
548
+ @staticmethod
549
+ def try_get() -> Optional[CompileContext]:
550
+ return getattr(_TLS, "compile_context", None)
551
+
552
+ def __init__(self, compile_id):
553
+ assert compile_id is None or isinstance(compile_id, CompileId)
554
+ self.compile_id: Optional[CompileId] = compile_id
555
+ self.attempt = 0
556
+
557
+ @staticmethod
558
+ def current_compile_id():
559
+ self = CompileContext.try_get()
560
+ if self is None:
561
+ return None
562
+ return self.compile_id
563
+
564
+ @staticmethod
565
+ def current_trace_id():
566
+ self = CompileContext.try_get()
567
+ if self is None:
568
+ return None
569
+ if self.compile_id is None:
570
+ return None
571
+ return TraceId(self.compile_id, self.attempt)
572
+
573
+
574
+ class TracingContext:
575
+ """
576
+ Provides the currently installed TracingContext, or None.
577
+
578
+ Note that it is a staticmethod, and invocations outside of `with tracing()` (see below), are valid but
579
+ will return None.
580
+ """
581
+
582
+ @staticmethod
583
+ def try_get() -> Optional[TracingContext]:
584
+ return getattr(_TLS, "tracing_context", None)
585
+
586
+ @staticmethod
587
+ def get() -> TracingContext:
588
+ if ctx := TracingContext.try_get():
589
+ return ctx
590
+ raise RuntimeError(
591
+ "TracingContext.get() must be called within an ongoing trace."
592
+ )
593
+
594
+ def __init__(self, fake_mode):
595
+ self.guards_context = GuardsContext()
596
+ self.module_context = ModuleContext()
597
+ self.global_context = GlobalContext()
598
+ self.fake_mode = fake_mode
599
+ self.frame_summary_stack = []
600
+ # This is morally part of frame_summary_stack, but it is kept separate
601
+ # for clarity. As we process a frame, this variable gets updated
602
+ # to keep track of what line we are in the function. We make a
603
+ # function call, this gets cleared and the frame location is pushed
604
+ # to frame_summary_stack (prepping this variable for the inner frame's
605
+ # progress)
606
+ self.loc_in_frame = None
607
+ # this is only set after aot_autograd
608
+ self.fw_metadata = None
609
+ self.params_flat = None
610
+ # this is for extended return calling convention from backend
611
+ # compiler to aot_autograd
612
+ # Per output, what the compiler specified stride of the output is,
613
+ # or None if no stride is known. This is always the HINT, it
614
+ # is never a SymInt (it would be better if it was a SymInt, but
615
+ # I can't conveniently get this from Inductor atm. Also, be
616
+ # careful not to accidentally induce guards on the SymInt if
617
+ # you ever do change this in aot_autograd.py; you should check
618
+ # on permutations preferentially.)
619
+ self.output_strides: Optional[List[Optional[List[int]]]] = None
620
+ # When this is True, whenever we encounter an int in Dynamo tracing,
621
+ # we will (1) force unspec it and (2) force it as a size-like unbacked
622
+ # integer. This is currently used when processing certain lists of
623
+ # ints that are known to be size-like and may have 0/1 entries that we
624
+ # must not specialize on.
625
+ self.force_unspec_int_unbacked_size_like = False
626
+ # See note [Tensor Fakification and Symbol Caching]
627
+ self.tensor_to_context = WeakTensorKeyDictionary()
628
+
629
+ # If this true, Aot Autograd will return output Fake Tensors with appropiate
630
+ # meta on the first invocation
631
+ # see note: [Returning Fake Tensors on First AOT Autograd Call]
632
+ self.fakify_first_call = False
633
+
634
+ def clear(self):
635
+ # Look at the note in output_graph.py in function `save_global_state`
636
+ # for the context on clearing global context.
637
+ self.global_context.global_state = {}
638
+
639
+ @staticmethod
640
+ @contextmanager
641
+ def patch(**kwargs):
642
+ prior = {}
643
+ ctx = TracingContext.get()
644
+
645
+ for key in kwargs.keys():
646
+ # KeyError on invalid entry
647
+ prior[key] = getattr(ctx, key)
648
+ for key, val in kwargs.items():
649
+ setattr(ctx, key, val)
650
+ try:
651
+ yield
652
+ finally:
653
+ for key, val in prior.items():
654
+ setattr(ctx, key, val)
655
+
656
+ @staticmethod
657
+ def extract_stack():
658
+ self = TracingContext.try_get()
659
+ if self is None:
660
+ return traceback.StackSummary()
661
+ stack = self.frame_summary_stack
662
+ if self.loc_in_frame is not None:
663
+ stack = stack + [self.loc_in_frame]
664
+ return traceback.StackSummary.from_list(stack)
665
+
666
+ # Call this when you want to call into some code that isn't necessarily
667
+ # associated with the current frame state
668
+ @staticmethod
669
+ @contextlib.contextmanager
670
+ def clear_frame():
671
+ tc = TracingContext.get()
672
+ with unittest.mock.patch.object(
673
+ tc, "frame_summary_stack", []
674
+ ), unittest.mock.patch.object(tc, "loc_in_frame", None):
675
+ try:
676
+ yield
677
+ except Exception as e:
678
+ # Prevent real_stack from getting attached
679
+ #
680
+ # The invariant is that if an Exception as real_stack, we've
681
+ # appropriately attached a user stack and we no longer need to
682
+ # attach anything. Because we cannot conveniently interpose
683
+ # when an exception is thrown, we instead interpose everywhere
684
+ # we set what the user stack is set (using the context
685
+ # manager). However, our compiler stack does "tail calls"
686
+ # (when it calls into user compiler), at which point the
687
+ # parent exception frames would incorrectly attach an
688
+ # incorrect frame.
689
+ #
690
+ # However, if, somehow, someone raised an exception with this
691
+ # scope that had a stack (for example, because they are
692
+ # restoring the user stack state appropriately as they process
693
+ # node by node), we should respect it. Thus, we cannot
694
+ # unconditionally set None.
695
+ if not hasattr(e, "real_stack"):
696
+ e.real_stack = None # type: ignore[attr-defined]
697
+ raise
698
+
699
+ @staticmethod
700
+ @contextlib.contextmanager
701
+ def current_frame(frame_summary):
702
+ # frame_summary can be None to solely take advantage of real_stack
703
+ # attachment to thrown exceptions
704
+ tc = TracingContext.get()
705
+ if frame_summary is not None:
706
+ tc.frame_summary_stack.append(frame_summary)
707
+ old = tc.loc_in_frame
708
+ tc.loc_in_frame = None
709
+ try:
710
+ yield
711
+ except Exception as e:
712
+ if not hasattr(e, "real_stack"):
713
+ e.real_stack = tc.extract_stack() # type: ignore[attr-defined]
714
+ raise
715
+ finally:
716
+ if frame_summary is not None:
717
+ tc.frame_summary_stack.pop()
718
+ tc.loc_in_frame = old
719
+
720
+ @staticmethod
721
+ @contextlib.contextmanager
722
+ def report_output_strides():
723
+ tc = TracingContext.try_get()
724
+ if tc is None:
725
+ yield None
726
+ return
727
+ old_output_strides = tc.output_strides
728
+ tc.output_strides = []
729
+ try:
730
+ yield tc.output_strides
731
+ finally:
732
+ tc.output_strides = old_output_strides
733
+
734
+ @staticmethod
735
+ def set_current_loc(filename, lineno, frame_name):
736
+ TracingContext.get().loc_in_frame = traceback.FrameSummary(
737
+ filename, lineno, frame_name
738
+ )
739
+
740
+
741
+ @contextmanager
742
+ def compile_context(context: CompileContext):
743
+ old_context = getattr(_TLS, "compile_context", None)
744
+ _TLS.compile_context = context
745
+ try:
746
+ yield context
747
+ finally:
748
+ _TLS.compile_context = old_context
749
+
750
+
751
+ @contextmanager
752
+ def tracing(context: Optional[TracingContext]):
753
+ """
754
+ This function installs the passed in tracing context as a dynamic scoped
755
+ global variable.
756
+
757
+ Calls to TracingContext.get() while not under a `with tracing()` context
758
+ will return None.
759
+ """
760
+ old_context = getattr(_TLS, "tracing_context", None)
761
+ _TLS.tracing_context = context
762
+ try:
763
+ yield context
764
+ except Exception as e:
765
+ if not hasattr(e, "real_stack") and context is not None:
766
+ e.real_stack = context.extract_stack() # type: ignore[attr-defined]
767
+ raise
768
+ finally:
769
+ if (
770
+ context is not None
771
+ and context.fake_mode is not None
772
+ and context.fake_mode.shape_env is not None
773
+ ):
774
+ context.fake_mode.shape_env.cleanup()
775
+ _TLS.tracing_context = old_context
776
+
777
+
778
+ # Subclasses can be found in torch/_dynamo/source.py
779
+ # TODO(voz): Consider a toplevel torch/_source.py
780
+ @dataclasses.dataclass(frozen=True)
781
+ class Source:
782
+ def is_dict_key(self):
783
+ return False
784
+
785
+ def is_ephemeral(self):
786
+ return False
787
+
788
+ def reconstruct(self, codegen):
789
+ raise NotImplementedError()
790
+
791
+ def guard_source(self) -> GuardSource:
792
+ raise NotImplementedError()
793
+
794
+ def name(self) -> str:
795
+ raise NotImplementedError()
796
+
797
+ def make_guard(self, fn) -> Guard:
798
+ if self.guard_source() is GuardSource.CONSTANT:
799
+ raise NotImplementedError()
800
+ return Guard(self, fn)
801
+
802
+ def is_nn_module(self) -> bool:
803
+ return self.guard_source().is_nn_module()
804
+
805
+ def subguards_allowed(self):
806
+ """True if you can guard on attributes of this"""
807
+ return self.guard_source() != GuardSource.SYNTHETIC_LOCAL
808
+
809
+
810
+ # Subclasses can be found in torch/_dynamo/source.py
811
+ @dataclasses.dataclass(frozen=True)
812
+ class ChainedSource(Source):
813
+ base: Source
814
+
815
+ def is_dict_key(self):
816
+ # Recurse until you either hit a ConstDictKey or a Source
817
+ return self.base.is_dict_key()
818
+
819
+ def is_ephemeral(self):
820
+ return self.base.is_ephemeral()
821
+
822
+
823
+ def detect_fake_mode(inputs: Any = None):
824
+ """
825
+ Attempts to "detect" what the current fake mode is. If there is one ambiently
826
+ available from TracingContext, we preferentially use that. Otherwise, we
827
+ heuristically detect the fake mode via the following sources, in order of
828
+ priority:
829
+
830
+ - Currently active fake mode on stack
831
+ - Fake mode associated with passed in tensors (inputs does not
832
+ have to be flattened)
833
+ """
834
+ from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
835
+
836
+ fake_modes = []
837
+
838
+ if context := TracingContext.try_get():
839
+ fake_mode = context.fake_mode
840
+ if fake_mode is not None:
841
+ fake_modes.append((fake_mode, "tracing context", 0))
842
+
843
+ from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
844
+
845
+ for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
846
+ if isinstance(m, FakeTensorMode):
847
+ fake_modes.append((m, "active fake mode", i))
848
+
849
+ flat_inputs = pytree.tree_leaves(inputs)
850
+ for i, flat_input in enumerate(flat_inputs):
851
+ if isinstance(flat_input, FakeTensor):
852
+ fake_modes.append((flat_input.fake_mode, "fake tensor input", i))
853
+
854
+ if fake_modes:
855
+ fake_mode, desc1, i1 = fake_modes[0]
856
+ for m, desc2, i2 in fake_modes[1:]:
857
+ assert fake_mode is m, (
858
+ f"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\n\n"
859
+ f"fake mode from {desc1} {i1} allocated at:\n{fake_mode.stack}\n"
860
+ f"fake mode from {desc2} {i2} allocated at:\n{m.stack}"
861
+ )
862
+ return fake_mode
863
+ else:
864
+ return None
865
+
866
+
867
+ def active_fake_mode():
868
+ """
869
+ Inspects the dispatch mode stack for an active fake mode and returns it.
870
+ Returns None if no fake mode is active.
871
+ """
872
+ from torch._subclasses.fake_tensor import FakeTensorMode
873
+ from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
874
+
875
+ for _, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
876
+ if isinstance(m, FakeTensorMode):
877
+ return m
878
+
879
+ return None
venv/lib/python3.10/site-packages/torch/_jit_internal.py ADDED
@@ -0,0 +1,1510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The weak_script annotation needs to be here instead of inside torch/jit/ so it
3
+ can be used in other places in torch/ (namely torch.nn) without running into
4
+ circular dependency problems
5
+ """
6
+
7
+ import ast
8
+ import builtins
9
+ import collections
10
+ import contextlib
11
+ import enum
12
+ import inspect
13
+ import io
14
+ import pickle
15
+ import sys
16
+ import threading
17
+ import types
18
+ import typing
19
+ import warnings
20
+ import weakref
21
+ from textwrap import dedent
22
+ from typing import ( # noqa: F401
23
+ Any,
24
+ Callable,
25
+ Dict,
26
+ Final,
27
+ ForwardRef,
28
+ Generic,
29
+ get_args, # new in 3.8
30
+ get_origin, # new in 3.8
31
+ List,
32
+ Optional,
33
+ Tuple,
34
+ Type,
35
+ TypeVar,
36
+ Union,
37
+ )
38
+
39
+ import torch
40
+
41
+ # This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
42
+ # Explicitly ask to import `torch.distributed.__init__` first.
43
+ # Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
44
+ import torch.distributed.rpc
45
+ import torch.package._mangling as package_mangling
46
+ from torch._awaits import _Await
47
+ from torch._C import _Await as CAwait, Future as CFuture
48
+ from torch._sources import fake_range, get_source_lines_and_file, parse_def
49
+ from torch.futures import Future
50
+
51
+ IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9)
52
+ IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10)
53
+
54
+ BuiltinUnionType: Union[Type, Tuple[Type, ...]]
55
+ if sys.version_info >= (3, 10):
56
+ # NOTE: IS_PY310_PLUS doesn't work with mypy.
57
+ # cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks
58
+ BuiltinUnionType = types.UnionType
59
+ else:
60
+ BuiltinUnionType = () # trick: this makes isinstance short circuit.
61
+
62
+ LockType: Type
63
+ try:
64
+ import _thread
65
+
66
+ LockType = _thread.LockType
67
+ except ImportError:
68
+ import _dummy_thread # type: ignore[import-not-found]
69
+
70
+ LockType = _dummy_thread.LockType
71
+
72
+ # Wrapper functions that can call either of 2 functions depending on a boolean
73
+ # argument
74
+ boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
75
+ weakref.WeakKeyDictionary()
76
+ ) # noqa: T484
77
+
78
+
79
+ FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
80
+
81
+
82
+ class SourceLoader:
83
+ def __init__(self):
84
+ self.content = {}
85
+
86
+ def cache(self, fn, source):
87
+ self.content[fn] = source
88
+
89
+ def get_source(self, fn):
90
+ return self.content.get(fn)
91
+
92
+
93
+ loader = SourceLoader()
94
+
95
+
96
+ def createResolutionCallbackFromEnv(lookup_base):
97
+ """
98
+ Creates a resolution callback that will look up qualified names in an
99
+ environment, starting with `lookup_base` for the base of any qualified
100
+ names, then proceeding down the lookup chain with the resolved object.
101
+
102
+ You should not use this directly, it should only be used from the other
103
+ createResolutionCallbackFrom* functions.
104
+ """
105
+
106
+ def lookupInModule(qualified_name, module):
107
+ if "." in qualified_name:
108
+ base, remaining_pieces = qualified_name.split(".", maxsplit=1)
109
+ module_value = getattr(module, base)
110
+ return lookupInModule(remaining_pieces, module_value)
111
+ else:
112
+ return getattr(module, qualified_name)
113
+
114
+ def parseNestedExpr(expr, module) -> Tuple[Any, int]:
115
+ i = 0
116
+ while i < len(expr) and expr[i] not in (",", "[", "]"):
117
+ i += 1
118
+
119
+ # Special case logic for the empty Tuple as a subscript (used
120
+ # in the type annotation `Tuple[()]`)
121
+ if expr[:i] == "()":
122
+ return (), i
123
+
124
+ base = lookupInModule(expr[:i].strip(), module)
125
+ assert base is not None, f"Unresolvable type {expr[:i]}"
126
+ if i == len(expr) or expr[i] != "[":
127
+ return base, i
128
+
129
+ assert expr[i] == "["
130
+ parts = []
131
+ while expr[i] != "]":
132
+ part_len = 0
133
+ i += 1
134
+ part, part_len = parseNestedExpr(expr[i:], module)
135
+ parts.append(part)
136
+ i += part_len
137
+ if len(parts) > 1:
138
+ return base[tuple(parts)], i + 1
139
+ else:
140
+ return base[parts[0]], i + 1
141
+
142
+ def parseExpr(expr, module):
143
+ try:
144
+ value, len_parsed = parseNestedExpr(expr, module)
145
+ assert len_parsed == len(
146
+ expr
147
+ ), "whole expression was not parsed, falling back to c++ parser"
148
+ return value
149
+ except Exception:
150
+ """
151
+ The python resolver fails in several cases in known unit tests, and is intended
152
+ to fall back gracefully to the c++ resolver in general. For example, python 2 style
153
+ annotations which are frequent in our unit tests often fail with types e.g. int not
154
+ resolvable from the calling frame.
155
+ """
156
+ return None
157
+
158
+ return lambda expr: parseExpr(expr, lookup_base)
159
+
160
+
161
+ def createResolutionCallbackFromFrame(frames_up: int = 0):
162
+ """
163
+ Creates a function which, given a string variable name,
164
+ returns the value of the variable in the scope of the caller of
165
+ the function which called createResolutionCallbackFromFrame (by default).
166
+
167
+ This is used to enable access in-scope Python variables inside
168
+ TorchScript fragments.
169
+
170
+ frames_up is number of additional frames to go up on the stack.
171
+ The default value is 0, which correspond to the frame of the caller
172
+ of createResolutionCallbackFromFrame. Also for example, if frames_up is set
173
+ to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
174
+ will be taken.
175
+
176
+ For example, the following program prints 2::
177
+
178
+ def bar():
179
+ cb = createResolutionCallbackFromFrame(1)
180
+ print(cb("foo"))
181
+
182
+ def baz():
183
+ foo = 2
184
+ bar()
185
+
186
+ baz()
187
+ """
188
+ frame = inspect.currentframe()
189
+ i = 0
190
+ while i < frames_up + 1:
191
+ assert frame is not None
192
+ frame = frame.f_back
193
+ i += 1
194
+
195
+ assert frame is not None
196
+ f_locals = frame.f_locals
197
+ f_globals = frame.f_globals
198
+
199
+ class env:
200
+ def __getattr__(self, key):
201
+ if key in f_locals:
202
+ return f_locals[key]
203
+ elif key in f_globals:
204
+ return f_globals[key]
205
+ elif key in dir(builtins):
206
+ return getattr(builtins, key)
207
+
208
+ return createResolutionCallbackFromEnv(env())
209
+
210
+
211
+ def get_closure(fn):
212
+ """
213
+ Get a dictionary of closed over variables from a function
214
+ """
215
+ captures = {}
216
+ captures.update(fn.__globals__)
217
+
218
+ for index, captured_name in enumerate(fn.__code__.co_freevars):
219
+ captures[captured_name] = fn.__closure__[index].cell_contents
220
+
221
+ return captures
222
+
223
+
224
+ # [local resolution in python]
225
+ # Depending on where a variable is defined, and where it is used, we may
226
+ # or may not be able to recover its value when recursively compiling a
227
+ # script function. Remember in the general case, a module or function is
228
+ # first defined and then later scripted. This means we do not have a
229
+ # chance to capture the active frames when the function is defined. Hence any
230
+ # name resolution has to happen later on the created closure. The way
231
+ # python captures type annotations restricts what we can recover. The
232
+ # follow example illustrates the different cases:
233
+ #
234
+ # class MyGlobalClass:
235
+ # ...
236
+ # def my_local_scope():
237
+ # @torch.jit.script
238
+ # class MyClass:
239
+ # ...
240
+ # @torch.jit.script
241
+ # class MyClassUsedAsVar:
242
+ # ...
243
+ # def eg(x: MyClass, y: MyGlobalClass):
244
+ # a_local_capture : Foo
245
+ # return MyClassUsedAsVar(x)
246
+ #
247
+ # MyGlobalClass is defined in the __globals__ dictionary of function
248
+ # 'eg', so it is always recoverable. my_local_scope introduces a new local
249
+ # variable scope in the function. Classes defined here are only visible as
250
+ # local variables. For the case of MyClassUsedAsVar, it is captured
251
+ # because it is used as a variable inside the body of the function, and we
252
+ # can resolve it using the captures returned from `get_closure`. However,
253
+ # the type annotations are not captured by the closure. In Python
254
+ # 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
255
+ # annotations on `eg``, but starting in Python 4.0, they will represented as
256
+ # strings and no longer present. Furthermore, since the body of `eg` does
257
+ # not reference those names, they do not appear in the list of closed over
258
+ # variables. In Python 2.x, type annotations are in comments, leading to a
259
+ # similar situation where their definitions are not available. We anticipate
260
+ # that most users will not run into this issue because their modules and
261
+ # functions will be defined at a global scope like MyGlobalClass. In cases
262
+ # where they are not, it is possible to work around issues by declaring the
263
+ # values global in the function.
264
+ # In Python 3.9 declaring class as global will make it invisible to
265
+ # `inspect.getsource`, see https://bugs.python.org/issue42666 .
266
+ # This could be worked around by manualy adding it to `global()` dictionary.
267
+
268
+
269
+ def createResolutionCallbackFromClosure(fn):
270
+ """
271
+ Create a resolutionCallback by introspecting the function instead of
272
+ looking up the stack for the enclosing scope
273
+ """
274
+ closure = get_closure(fn)
275
+
276
+ class closure_lookup:
277
+ # This is a class since `closure` is a dict and it's easier in
278
+ # `env_helper` if everything just works with `getattr` calls
279
+ def __getattr__(self, key):
280
+ if key in closure:
281
+ return closure[key]
282
+ elif hasattr(typing, key):
283
+ return getattr(typing, key)
284
+ elif hasattr(builtins, key):
285
+ return getattr(builtins, key)
286
+ return None
287
+
288
+ return createResolutionCallbackFromEnv(closure_lookup())
289
+
290
+
291
+ def can_compile_class(cls) -> bool:
292
+ # If any of the functions on a type don't have a code object, this type can't
293
+ # be compiled and is probably a builtin / bound from C
294
+ if is_ignored_fn(cls):
295
+ return False
296
+
297
+ # Ignore the following list of built-in classes.
298
+ ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
299
+ if issubclass(cls, ignored_builtin_classes):
300
+ return False
301
+
302
+ names = cls.__dict__
303
+ fns = [
304
+ getattr(cls, name)
305
+ for name in names
306
+ if inspect.isroutine(getattr(cls, name, None))
307
+ ]
308
+ has_code = [hasattr(fn, "__code__") for fn in fns]
309
+ return all(has_code)
310
+
311
+
312
+ def get_callable_argument_names(fn) -> List[str]:
313
+ """
314
+ Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
315
+ Returns an empty list when other types of arguments are present.
316
+
317
+ This is used by `torch.jit.trace` to assign meaningful argument names to
318
+ traced functions and modules.
319
+
320
+ Args:
321
+ fn: A callable.
322
+ Returns:
323
+ Argument names: List[str]
324
+ """
325
+ # inspect.signature may fail, give up in that case.
326
+ try:
327
+ callable_signature = inspect.signature(fn)
328
+ except Exception:
329
+ return []
330
+
331
+ argument_names = []
332
+ for name, param in callable_signature.parameters.items():
333
+ # All four other types of arguments do not map to individual values
334
+ # with a keyword as name.
335
+ if not param.kind == param.POSITIONAL_OR_KEYWORD:
336
+ continue
337
+
338
+ argument_names.append(name)
339
+
340
+ return argument_names
341
+
342
+
343
+ def get_annotation_str(annotation):
344
+ """
345
+ Convert an AST node containing a type annotation to the string present in the source
346
+ that represents the same annotation.
347
+ """
348
+ if isinstance(annotation, ast.Name):
349
+ return annotation.id
350
+ elif isinstance(annotation, ast.Attribute):
351
+ return ".".join([get_annotation_str(annotation.value), annotation.attr])
352
+ elif isinstance(annotation, ast.Subscript):
353
+ # In Python3.9+ subscript indicies are not wrapped in ast.Index
354
+ subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined]
355
+ return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
356
+ elif isinstance(annotation, ast.Tuple):
357
+ return ",".join([get_annotation_str(elt) for elt in annotation.elts])
358
+ elif isinstance(annotation, (ast.Constant, ast.NameConstant)):
359
+ return f"{annotation.value}"
360
+
361
+ # If an AST node is not handled here, it's probably handled in ScriptTypeParser.
362
+ return None
363
+
364
+
365
+ def get_type_hint_captures(fn):
366
+ """
367
+ Get a dictionary containing type resolution mappings necessary to resolve types
368
+ for the literal annotations on 'fn'. These are not considered to be closed-over by fn
369
+ and must be obtained separately (e.g. using this function).
370
+
371
+ Args:
372
+ fn: A callable.
373
+ Returns:
374
+ A Dict[str, Any] containing a mapping from the literal annotations used on
375
+ fn to the Python objects they refer to.
376
+ """
377
+ # First, try to get the source of the function. We'll need to parse it to find the actual string names
378
+ # that were used to annotate the types, since inspect.signature() will only return the class object that
379
+ # the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
380
+ # This may happen in cases where the function is synthesized dynamically at runtime.
381
+ src = loader.get_source(fn)
382
+ if src is None:
383
+ src = inspect.getsource(fn)
384
+
385
+ # Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
386
+ # types are strings. These are only understood by TorchScript in the context of a type annotation
387
+ # that refers to a class in its own definition, but trying to include a mapping for this in the result
388
+ # function would cause infinite recursion because the class is currently being compiled.
389
+ # In addition, there is logic in ScriptTypeParser to handle this.
390
+ signature = inspect.signature(fn)
391
+ name_to_type = {
392
+ name: parameter.annotation
393
+ for name, parameter in signature.parameters.items()
394
+ if parameter.annotation is not inspect.Parameter.empty
395
+ and not isinstance(parameter.annotation, str)
396
+ }
397
+
398
+ # Then, get the literal type annotations from the function declaration
399
+ # by source inspection. This accounts for the case in which aliases are used
400
+ # to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
401
+ # frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
402
+ a = ast.parse(dedent(src))
403
+ if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
404
+ raise RuntimeError(f"Expected {fn} to be a function")
405
+ f = a.body[0]
406
+
407
+ # Prepare a dictionary of source annotation -> type, which will be the final result of this function,
408
+ # by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
409
+ # them to the type object corresponding to the annotation via name_to_type using the parameter name.
410
+ annotation_to_type = {}
411
+
412
+ for arg in f.args.args:
413
+ # Get the source type annotation string for this argument if possible.
414
+ arg_annotation_str = (
415
+ get_annotation_str(arg.annotation) if arg.annotation else None
416
+ )
417
+
418
+ # If the argument has no annotation or get_annotation_str cannot convert it to a string,
419
+ # arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
420
+ # this in the latter case.
421
+ if arg_annotation_str is None:
422
+ continue
423
+
424
+ # Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
425
+ # be present in name_to_type is that the annotation itself is a string and not a type object
426
+ # (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
427
+ arg_name = arg.arg
428
+ if arg_name in name_to_type:
429
+ annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
430
+
431
+ # If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
432
+ # the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
433
+ # of the annotation cannot be a string.
434
+ literal_return_annotation = get_annotation_str(f.returns)
435
+ valid_literal_annotation = literal_return_annotation is not None
436
+ return_annotation = signature.return_annotation
437
+ valid_return_annotation_type = (
438
+ return_annotation is not inspect.Parameter.empty
439
+ and not isinstance(return_annotation, str)
440
+ )
441
+ if valid_literal_annotation and valid_return_annotation_type:
442
+ annotation_to_type[literal_return_annotation] = return_annotation
443
+
444
+ return annotation_to_type
445
+
446
+
447
+ def createResolutionCallbackForClassMethods(cls):
448
+ """
449
+ This looks at all the methods defined in a class and pulls their closed-over
450
+ variables into a dictionary and uses that to resolve variables.
451
+ """
452
+ # cls is a type here, so `ismethod` is false since the methods on the type
453
+ # aren't bound to anything, so Python treats them as regular functions
454
+ fns = [
455
+ getattr(cls, name)
456
+ for name in cls.__dict__
457
+ if inspect.isroutine(getattr(cls, name))
458
+ ]
459
+ # Skip built-ins, as they do not have global scope nor type hints
460
+ # Needed to support `enum.Enum` derived classes in Python-3.11
461
+ # That adds `_new_member_` property which is an alias to `__new__`
462
+ fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")]
463
+ captures = {}
464
+
465
+ for fn in fns:
466
+ captures.update(get_closure(fn))
467
+ captures.update(get_type_hint_captures(fn))
468
+
469
+ def lookup_in_class(key):
470
+ if key in captures:
471
+ return captures[key]
472
+ else:
473
+ return getattr(builtins, key, None)
474
+
475
+ return lookup_in_class
476
+
477
+
478
+ def boolean_dispatch(
479
+ arg_name, arg_index, default, if_true, if_false, module_name, func_name
480
+ ):
481
+ """
482
+ Dispatches to either of 2 script functions based on a boolean argument.
483
+ In TorchScript, the boolean argument must be constant so that the correct
484
+ function to use can be determined at compile time.
485
+ """
486
+
487
+ def fn(*args, **kwargs):
488
+ dispatch_flag = default
489
+ if arg_name in kwargs:
490
+ dispatch_flag = kwargs[arg_name]
491
+ elif arg_index < len(args):
492
+ dispatch_flag = args[arg_index]
493
+
494
+ if dispatch_flag:
495
+ return if_true(*args, **kwargs)
496
+ else:
497
+ return if_false(*args, **kwargs)
498
+
499
+ if if_true.__doc__ is None and if_false.__doc__ is not None:
500
+ doc = if_false.__doc__
501
+ if_true.__doc__ = doc
502
+ elif if_false.__doc__ is None and if_true.__doc__ is not None:
503
+ doc = if_true.__doc__
504
+ if_false.__doc__ = doc
505
+ elif if_false.__doc__ is None and if_true.__doc__ is None:
506
+ # neither function has a docstring
507
+ doc = None
508
+ else:
509
+ raise RuntimeError("only one function can have a docstring")
510
+ fn.__doc__ = doc
511
+
512
+ if module_name is not None:
513
+ fn.__module__ = module_name
514
+ if func_name is not None:
515
+ fn.__name__ = func_name
516
+
517
+ boolean_dispatched[fn] = {
518
+ "if_true": if_true,
519
+ "if_false": if_false,
520
+ "index": arg_index,
521
+ "default": default,
522
+ "arg_name": arg_name,
523
+ }
524
+ return fn
525
+
526
+
527
+ class FunctionModifiers:
528
+ """
529
+ Used to denote the behavior of a function in TorchScript. See export() and
530
+ ignore() for details.
531
+ """
532
+
533
+ UNUSED = "unused (ignored and replaced with raising of an exception)"
534
+ IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
535
+ EXPORT = "export (compile this function even if nothing calls it)"
536
+ DEFAULT = "default (compile if called from a exported function / forward)"
537
+ COPY_TO_SCRIPT_WRAPPER = (
538
+ "if this method is not scripted, copy the python method onto the scripted model"
539
+ )
540
+ _DROP = "_drop (function is fully ignored, declaration can be unscriptable)"
541
+
542
+
543
+ def export(fn):
544
+ """
545
+ This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
546
+ :class:`ScriptModule` and should be compiled.
547
+
548
+ ``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
549
+ Functions and methods called from ``forward`` are compiled as they are seen
550
+ by the compiler, so they do not need this decorator either.
551
+
552
+ Example (using ``@torch.jit.export`` on a method):
553
+
554
+ .. testcode::
555
+
556
+ import torch
557
+ import torch.nn as nn
558
+
559
+ class MyModule(nn.Module):
560
+ def implicitly_compiled_method(self, x):
561
+ return x + 99
562
+
563
+ # `forward` is implicitly decorated with `@torch.jit.export`,
564
+ # so adding it here would have no effect
565
+ def forward(self, x):
566
+ return x + 10
567
+
568
+ @torch.jit.export
569
+ def another_forward(self, x):
570
+ # When the compiler sees this call, it will compile
571
+ # `implicitly_compiled_method`
572
+ return self.implicitly_compiled_method(x)
573
+
574
+ def unused_method(self, x):
575
+ return x - 20
576
+
577
+ # `m` will contain compiled methods:
578
+ # `forward`
579
+ # `another_forward`
580
+ # `implicitly_compiled_method`
581
+ # `unused_method` will not be compiled since it was not called from
582
+ # any compiled methods and wasn't decorated with `@torch.jit.export`
583
+ m = torch.jit.script(MyModule())
584
+ """
585
+ fn._torchscript_modifier = FunctionModifiers.EXPORT
586
+ return fn
587
+
588
+
589
+ def unused(fn):
590
+ """
591
+ This decorator indicates to the compiler that a function or method should
592
+ be ignored and replaced with the raising of an exception. This allows you
593
+ to leave code in your model that is not yet TorchScript compatible and still
594
+ export your model.
595
+
596
+ Example (using ``@torch.jit.unused`` on a method)::
597
+
598
+ import torch
599
+ import torch.nn as nn
600
+
601
+ class MyModule(nn.Module):
602
+ def __init__(self, use_memory_efficient):
603
+ super().__init__()
604
+ self.use_memory_efficient = use_memory_efficient
605
+
606
+ @torch.jit.unused
607
+ def memory_efficient(self, x):
608
+ import pdb
609
+ pdb.set_trace()
610
+ return x + 10
611
+
612
+ def forward(self, x):
613
+ # Use not-yet-scriptable memory efficient mode
614
+ if self.use_memory_efficient:
615
+ return self.memory_efficient(x)
616
+ else:
617
+ return x + 10
618
+
619
+ m = torch.jit.script(MyModule(use_memory_efficient=False))
620
+ m.save("m.pt")
621
+
622
+ m = torch.jit.script(MyModule(use_memory_efficient=True))
623
+ # exception raised
624
+ m(torch.rand(100))
625
+ """
626
+ if isinstance(fn, property):
627
+ prop = fn
628
+ setattr( # noqa: B010
629
+ prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
630
+ )
631
+
632
+ if prop.fset:
633
+ setattr( # noqa: B010
634
+ prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
635
+ )
636
+
637
+ return prop
638
+
639
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
640
+ return fn
641
+
642
+
643
+ # No op context manager from python side
644
+ class _IgnoreContextManager(contextlib.AbstractContextManager):
645
+ def __init__(self, **kwargs):
646
+ pass
647
+
648
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
649
+ pass
650
+
651
+
652
+ def ignore(drop=False, **kwargs):
653
+ """
654
+ This decorator indicates to the compiler that a function or method should
655
+ be ignored and left as a Python function. This allows you to leave code in
656
+ your model that is not yet TorchScript compatible. If called from TorchScript,
657
+ ignored functions will dispatch the call to the Python interpreter. Models with ignored
658
+ functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
659
+
660
+ Example (using ``@torch.jit.ignore`` on a method)::
661
+
662
+ import torch
663
+ import torch.nn as nn
664
+
665
+ class MyModule(nn.Module):
666
+ @torch.jit.ignore
667
+ def debugger(self, x):
668
+ import pdb
669
+ pdb.set_trace()
670
+
671
+ def forward(self, x):
672
+ x += 10
673
+ # The compiler would normally try to compile `debugger`,
674
+ # but since it is `@ignore`d, it will be left as a call
675
+ # to Python
676
+ self.debugger(x)
677
+ return x
678
+
679
+ m = torch.jit.script(MyModule())
680
+
681
+ # Error! The call `debugger` cannot be saved since it calls into Python
682
+ m.save("m.pt")
683
+
684
+ Example (using ``@torch.jit.ignore(drop=True)`` on a method):
685
+
686
+ .. testcode::
687
+
688
+ import torch
689
+ import torch.nn as nn
690
+
691
+ class MyModule(nn.Module):
692
+ @torch.jit.ignore(drop=True)
693
+ def training_method(self, x):
694
+ import pdb
695
+ pdb.set_trace()
696
+
697
+ def forward(self, x):
698
+ if self.training:
699
+ self.training_method(x)
700
+ return x
701
+
702
+ m = torch.jit.script(MyModule())
703
+
704
+ # This is OK since `training_method` is not saved, the call is replaced
705
+ # with a `raise`.
706
+ m.save("m.pt")
707
+
708
+ .. testcleanup::
709
+
710
+ import os
711
+ os.remove('m.pt')
712
+ """
713
+
714
+ if callable(drop):
715
+ # used without any args, so drop is actually a function
716
+ # @torch.jit.ignore
717
+ # def fn(...):
718
+ fn = drop
719
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
720
+ return fn
721
+
722
+ if not isinstance(drop, bool):
723
+ raise RuntimeError(
724
+ "Argument to @torch.jit.ignore must be a bool or "
725
+ f"a function but got {drop}"
726
+ )
727
+
728
+ # for backwards compat
729
+ drop_on_export = kwargs.pop("drop_on_export", None)
730
+ if drop_on_export:
731
+ warnings.warn(
732
+ "ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
733
+ "call on compilation. Use torch.jit.unused now. {}",
734
+ category=FutureWarning,
735
+ )
736
+
737
+ drop = drop_on_export
738
+ elif drop:
739
+ warnings.warn(
740
+ "ignore(True) has been deprecated. TorchScript will now drop the function "
741
+ "call on compilation. Use torch.jit.unused now. {}",
742
+ category=FutureWarning,
743
+ )
744
+
745
+ def decorator(fn):
746
+ if drop:
747
+ fn._torchscript_modifier = FunctionModifiers.UNUSED
748
+ else:
749
+ fn._torchscript_modifier = FunctionModifiers.IGNORE
750
+ return fn
751
+
752
+ return decorator
753
+
754
+
755
+ def _drop(fn):
756
+ fn._torchscript_modifier = FunctionModifiers._DROP
757
+ return fn
758
+
759
+
760
+ def _copy_to_script_wrapper(fn):
761
+ fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
762
+ return fn
763
+
764
+
765
+ def module_has_exports(mod):
766
+ for name in dir(mod):
767
+ if hasattr(mod, name):
768
+ item = getattr(mod, name)
769
+ if callable(item):
770
+ if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
771
+ return True
772
+ return False
773
+
774
+
775
+ # WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
776
+ # rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
777
+ # allow JIT'd code to still be covered.
778
+ def should_drop(fn) -> bool:
779
+ attr = get_torchscript_modifier(fn)
780
+ if attr is None:
781
+ return False
782
+ return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
783
+
784
+
785
+ def is_ignored_fn(fn) -> bool:
786
+ mod = get_torchscript_modifier(fn)
787
+ return (
788
+ mod is FunctionModifiers.UNUSED
789
+ or mod is FunctionModifiers.IGNORE
790
+ or mod is FunctionModifiers._DROP
791
+ )
792
+
793
+
794
+ def _is_drop_fn(fn) -> bool:
795
+ mod = get_torchscript_modifier(fn)
796
+ return mod is FunctionModifiers._DROP
797
+
798
+
799
+ def is_static_fn(cls, fn) -> bool:
800
+ return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
801
+
802
+
803
+ def get_static_fn(cls, fn):
804
+ return inspect.getattr_static(cls, fn).__func__
805
+
806
+
807
+ def get_torchscript_modifier(fn):
808
+ if not callable(fn):
809
+ return None
810
+ if hasattr(fn, "__func__"):
811
+ fn = fn.__func__
812
+ return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
813
+
814
+
815
+ def copy_torchscript_modifier(orig, new) -> None:
816
+ attr = get_torchscript_modifier(orig)
817
+ if attr is None:
818
+ return
819
+ new._torchscript_modifier = attr
820
+
821
+
822
+ # overloading registration
823
+ # overloads get registered in this file, and compiled in torch/jit/__init__.py
824
+ # so that they can be imported in nn/functional.py without an import cycle
825
+
826
+ # qualified_name => list[overload_functions]
827
+ _overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
828
+
829
+
830
+ _OVERLOAD_EXAMPLE = """
831
+ Example usage of overload function:
832
+ @torch.jit._overload
833
+ def my_function(x: type0) -> type0: # decl 1
834
+ pass
835
+
836
+ @torch.jit._overload
837
+ def my_function(x: type1) -> type1: # decl 2
838
+ pass
839
+
840
+ def my_function(x): # implementation
841
+ if isinstance(x, type0):
842
+ return x
843
+ elif isinstance(x, type1):
844
+ return x
845
+ """
846
+
847
+
848
+ def get_overload_no_implementation_error_message(kind, obj):
849
+ sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
850
+ return (
851
+ f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
852
+ f"sure a definition is provided and defined after all overload declarations.\n"
853
+ f'File "{filename}", line {file_lineno}:\n'
854
+ + "".join(sourcelines)
855
+ + "\n"
856
+ + _OVERLOAD_EXAMPLE
857
+ )
858
+
859
+
860
+ def _check_overload_body(func):
861
+ try:
862
+ parsed_def = parse_def(func)
863
+ except OSError as e:
864
+ # Parsing the function definition can raise an OSError if source is unavailable.
865
+ # Since this is just an initial check, just raise a warning if this is the case.
866
+ warnings.warn(
867
+ f"Unable to retrieve source for @torch.jit._overload function: {func}."
868
+ )
869
+ return
870
+
871
+ body = parsed_def.ast.body[0].body
872
+
873
+ def is_pass(x):
874
+ return isinstance(x, ast.Pass)
875
+
876
+ def is_ellipsis(x):
877
+ return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
878
+
879
+ if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
880
+ msg = (
881
+ "Only `pass` statement or `...` can be the body of overload declaration:\n"
882
+ )
883
+ msg += "\n".join(parsed_def.source.split("\n")[:3])
884
+ msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
885
+ raise RuntimeError(msg)
886
+
887
+
888
+ def _overload(func):
889
+ _check_overload_body(func)
890
+ qual_name = _qualified_name(func)
891
+ global _overloaded_fns
892
+ fn_overload_list = _overloaded_fns.get(qual_name)
893
+ if fn_overload_list is None:
894
+ fn_overload_list = []
895
+ _overloaded_fns[qual_name] = fn_overload_list
896
+ fn_overload_list.append(func)
897
+ return func
898
+
899
+
900
+ def _get_fn_overloads(qual_name):
901
+ return _overloaded_fns.get(qual_name)
902
+
903
+
904
+ def _clear_fn_overloads(qual_name) -> None:
905
+ del _overloaded_fns[qual_name]
906
+
907
+
908
+ def get_class_name_lineno(method) -> Tuple[str, int]:
909
+ current_frame = inspect.currentframe()
910
+
911
+ # one for the get_class_name call, one for _overload_method call
912
+ for i in range(2):
913
+ assert (
914
+ current_frame is not None
915
+ ) # assert current frame is not an Optional[FrameType]
916
+ current_frame = current_frame.f_back
917
+
918
+ assert current_frame is not None # same here
919
+ class_name = current_frame.f_code.co_name
920
+ line_no = current_frame.f_code.co_firstlineno
921
+ return class_name, line_no
922
+
923
+
924
+ # At the point the decorator is applied to class methods the method
925
+ # has no reference to its owning class. _qualified_name would not include
926
+ # the class it is defined in, so any methods with the same name in the same file
927
+ # would have the same _qualified_name, even if they were defined in different
928
+ # classes. This problem only exists in python 2.
929
+ # We get around this problem by looking at the stack frame and identifying
930
+ # the class name, and throwing an error whenever overloads are used
931
+ # when modules of the same name are in the same file
932
+
933
+ # qualified_name => class name => list[overload_functions]
934
+ _overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
935
+
936
+
937
+ # (qualified_name, class name) => class_fileno
938
+ _overloaded_method_class_fileno: Dict[Tuple[str, str], int] = {}
939
+
940
+
941
+ def _overload_method(func):
942
+ _check_overload_body(func)
943
+ qual_name = _qualified_name(func)
944
+ global _overloaded_methods
945
+ class_name_map = _overloaded_methods.get(qual_name, None)
946
+ if class_name_map is None:
947
+ class_name_map = {}
948
+ _overloaded_methods[qual_name] = class_name_map
949
+
950
+ class_name, line_no = get_class_name_lineno(func)
951
+ method_overloads = class_name_map.get(class_name, None)
952
+ if method_overloads is None:
953
+ method_overloads = []
954
+ class_name_map[class_name] = method_overloads
955
+ _overloaded_method_class_fileno[(qual_name, class_name)] = line_no
956
+ else:
957
+ existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
958
+ if existing_lineno != line_no:
959
+ raise RuntimeError(
960
+ "Cannot currently overload the same method name in two different"
961
+ " classes with the same name in the same module"
962
+ )
963
+
964
+ method_overloads.append(func)
965
+ return func
966
+
967
+
968
+ def _get_overloaded_methods(method, mod_class):
969
+ # TODO: __name__ not set for submodules in recursive script
970
+ if not hasattr(method, "__name__"):
971
+ return None
972
+ qual_name = _qualified_name(method)
973
+ class_name_map = _overloaded_methods.get(qual_name, None)
974
+ if class_name_map is None:
975
+ return None
976
+ overloads = class_name_map.get(mod_class.__name__, None)
977
+ if overloads is None:
978
+ return None
979
+
980
+ method_line_no = get_source_lines_and_file(method)[1]
981
+ mod_class_fileno = get_source_lines_and_file(mod_class)[1]
982
+ mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
983
+ if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
984
+ raise Exception(
985
+ "Overloads are not useable when a module is redeclared within the same file: "
986
+ + str(method)
987
+ )
988
+ return overloads
989
+
990
+
991
+ def is_tuple(ann) -> bool:
992
+ if ann is Tuple:
993
+ raise_error_container_parameter_missing("Tuple")
994
+
995
+ # For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
996
+ if not hasattr(ann, "__module__"):
997
+ return False
998
+
999
+ ann_origin = get_origin(ann)
1000
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple:
1001
+ return True
1002
+ return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple)
1003
+
1004
+
1005
+ def is_list(ann) -> bool:
1006
+ if ann is List:
1007
+ raise_error_container_parameter_missing("List")
1008
+
1009
+ if not hasattr(ann, "__module__"):
1010
+ return False
1011
+
1012
+ ann_origin = get_origin(ann)
1013
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list:
1014
+ return True
1015
+ return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list)
1016
+
1017
+
1018
+ def is_dict(ann) -> bool:
1019
+ if ann is Dict:
1020
+ raise_error_container_parameter_missing("Dict")
1021
+
1022
+ if not hasattr(ann, "__module__"):
1023
+ return False
1024
+
1025
+ ann_origin = get_origin(ann)
1026
+ if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict:
1027
+ return True
1028
+ return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict)
1029
+
1030
+
1031
+ def is_union(ann):
1032
+ if ann is Union:
1033
+ raise_error_container_parameter_missing("Union")
1034
+
1035
+ return isinstance(ann, BuiltinUnionType) or (
1036
+ hasattr(ann, "__module__")
1037
+ and ann.__module__ == "typing"
1038
+ and (get_origin(ann) is Union)
1039
+ )
1040
+
1041
+
1042
+ def is_optional(ann):
1043
+ if ann is Optional:
1044
+ raise_error_container_parameter_missing("Optional")
1045
+
1046
+ def is_optional_as_optional(ann):
1047
+ return (
1048
+ hasattr(ann, "__module__")
1049
+ and ann.__module__ == "typing"
1050
+ and (get_origin(ann) is Optional)
1051
+ )
1052
+
1053
+ def is_union_as_optional(ann):
1054
+ ann_args = get_args(ann)
1055
+ return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
1056
+
1057
+ return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
1058
+
1059
+
1060
+ def is_future(ann) -> bool:
1061
+ if ann is Future:
1062
+ raise RuntimeError(
1063
+ "Attempted to use Future without a "
1064
+ "contained type. Please add a contained type, e.g. "
1065
+ "Future[int]"
1066
+ )
1067
+ return get_origin(ann) is Future
1068
+
1069
+
1070
+ def is_await(ann) -> bool:
1071
+ if ann is _Await:
1072
+ return True
1073
+ return get_origin(ann) is _Await
1074
+
1075
+
1076
+ if torch.distributed.rpc.is_available():
1077
+ from torch._C._distributed_rpc import PyRRef
1078
+ from torch.distributed.rpc import RRef
1079
+
1080
+ def is_rref(ann) -> bool:
1081
+ if ann is RRef:
1082
+ raise RuntimeError(
1083
+ "Attempted to use RRef without a "
1084
+ "contained type. Please add a contained type, e.g. "
1085
+ "RRef[int]"
1086
+ )
1087
+ return get_origin(ann) is RRef
1088
+
1089
+ def is_rref_instance(obj) -> bool:
1090
+ return isinstance(obj, PyRRef)
1091
+
1092
+ else:
1093
+
1094
+ def is_rref_instance(obj) -> bool:
1095
+ # If the RPC module doesn't exist then RRefs don't exist either.
1096
+ return False
1097
+
1098
+
1099
+ def is_final(ann) -> bool:
1100
+ return (
1101
+ hasattr(ann, "__module__")
1102
+ and ann.__module__ in {"typing", "typing_extensions"}
1103
+ and (get_origin(ann) is Final or isinstance(ann, type(Final)))
1104
+ )
1105
+
1106
+
1107
+ # allows BroadcastingList instance to be subscriptable
1108
+ class BroadcastingListCls:
1109
+ def __getitem__(self, types):
1110
+ return
1111
+
1112
+
1113
+ # mypy doesn't support parameters on types, so we have to explicitly type each
1114
+ # list size
1115
+ BroadcastingList1 = BroadcastingListCls()
1116
+ for i in range(2, 7):
1117
+ globals()[f"BroadcastingList{i}"] = BroadcastingList1
1118
+
1119
+
1120
+ def is_scripting() -> bool:
1121
+ r"""
1122
+ Function that returns True when in compilation and False otherwise. This
1123
+ is useful especially with the @unused decorator to leave code in your
1124
+ model that is not yet TorchScript compatible.
1125
+ .. testcode::
1126
+
1127
+ import torch
1128
+
1129
+ @torch.jit.unused
1130
+ def unsupported_linear_op(x):
1131
+ return x
1132
+
1133
+ def linear(x):
1134
+ if torch.jit.is_scripting():
1135
+ return torch.linear(x)
1136
+ else:
1137
+ return unsupported_linear_op(x)
1138
+ """
1139
+ return False
1140
+
1141
+
1142
+ # Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
1143
+ def _qualified_name(obj, mangle_name=True) -> str:
1144
+ # This special case allows us to override the qualified name on a type.
1145
+ # It's currently used in conjunction with tracing, where we create a
1146
+ # fake module to filter only supported attributes. However, since this
1147
+ # new type is defined as a local class, we need a mechanism to override
1148
+ # its qualname so it appears correctly in the TorchScript system. This,
1149
+ # we set '_jit_override_qualname' with the original traced module's
1150
+ # qualified name, which is picked up here
1151
+ if hasattr(obj, "_jit_override_qualname"):
1152
+ return obj._jit_override_qualname
1153
+ # short-circuit in cases where the object already has a known qualified name
1154
+ if isinstance(obj, torch._C.ScriptFunction):
1155
+ return obj.qualified_name
1156
+
1157
+ if getattr(obj, "__name__", None):
1158
+ name = obj.__name__
1159
+ # Enum classes do not have `__name__` attr, instead they have `name`.
1160
+ elif isinstance(obj, enum.Enum):
1161
+ name = obj.name
1162
+ else:
1163
+ raise RuntimeError("Could not get name of python class object")
1164
+
1165
+ if name == "<lambda>":
1166
+ name = "_lambda" # make name a valid identifier
1167
+
1168
+ module_name = obj.__module__
1169
+
1170
+ # If the module is actually a torchbind module, then we should short circuit
1171
+ if module_name == "torch._classes":
1172
+ return obj.qualified_name
1173
+
1174
+ # The Python docs are very clear that `__module__` can be None, but I can't
1175
+ # figure out when it actually would be.
1176
+ if module_name is None:
1177
+ raise RuntimeError(
1178
+ f"Could not get qualified name for class '{name}': "
1179
+ "__module__ can't be None."
1180
+ )
1181
+
1182
+ # if getattr(sys.modules[module_name], name) is not obj:
1183
+ # raise RuntimeError(f"Could not get qualified name for class '{name}': "
1184
+ # f"the attr {name} on module {module_name} is not the class")
1185
+
1186
+ # torch.package and TorchScript have separate mangling schemes to avoid
1187
+ # name collisions from multiple packages. To avoid them interfering with
1188
+ # each other, normalize the package manging here.
1189
+ if package_mangling.is_mangled(module_name):
1190
+ module_name = module_name.replace("<", "_")
1191
+ module_name = module_name.replace(">", "_")
1192
+
1193
+ # The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
1194
+ # does not need mangle the python class name.
1195
+ if mangle_name:
1196
+ # __main__ is a builtin module, so rewrite it to "__torch__".
1197
+ if module_name == "__main__":
1198
+ module_name = "__torch__"
1199
+ else:
1200
+ # Everything else gets a "__torch__" prefix to avoid name collisions
1201
+ # with the names of user values.
1202
+ module_name = "__torch__." + module_name
1203
+
1204
+ if "." in name:
1205
+ raise RuntimeError(
1206
+ f"Could not get qualified name for class '{name}': "
1207
+ f"'{name}' is not a valid identifier"
1208
+ )
1209
+
1210
+ return module_name + "." + name
1211
+
1212
+
1213
+ def _try_get_dispatched_fn(fn):
1214
+ if not callable(fn):
1215
+ return None
1216
+ return boolean_dispatched.get(fn)
1217
+
1218
+
1219
+ def _get_named_tuple_properties(
1220
+ obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None
1221
+ ):
1222
+ if loc is None:
1223
+ loc = fake_range()
1224
+
1225
+ assert issubclass(obj, tuple) and hasattr(obj, "_fields")
1226
+ if hasattr(obj, "_field_defaults"):
1227
+ defaults = [
1228
+ obj._field_defaults[field]
1229
+ for field in obj._fields
1230
+ if field in obj._field_defaults
1231
+ ]
1232
+ else:
1233
+ defaults = []
1234
+ # In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
1235
+ # Also, annotations from base class are not inherited so they need to be queried explicitly
1236
+ if sys.version_info[:2] < (3, 10):
1237
+ obj_annotations = getattr(obj, "__annotations__", {})
1238
+ else:
1239
+ obj_annotations = inspect.get_annotations(obj)
1240
+ if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
1241
+ obj_annotations = inspect.get_annotations(obj.__base__)
1242
+
1243
+ annotations = []
1244
+ for field in obj._fields:
1245
+ if field in obj_annotations:
1246
+ field_type = obj_annotations[field]
1247
+ # [Note: ForwardRef annotations in NamedTuple attributes]
1248
+ # NamedTuple types are slightly different from normal types.
1249
+ #
1250
+ # Normally, annotations are evaluted like this (during jit.script):
1251
+ # 1. Load strings of python code into c++ and parse.
1252
+ # 2. Get annotations as strings
1253
+ # 3. Use the PythonResolver's resolution callback (rcb) to convert
1254
+ # the string into a python object
1255
+ # 4. We call into annotations.py:ann_to_type to convert python obj
1256
+ # from step 3 into a type that torchscript understands.
1257
+ #
1258
+ # NamedTuples are more complicated, because it has sub-types.
1259
+ # Normally, once we have the NamedTuple type object from #3,
1260
+ # we can just look at the annotation literal values and use
1261
+ # ann_to_type directly on them.
1262
+ #
1263
+ # But sometimes, users will annotate with string literals, e.g.
1264
+ # x: 'int'
1265
+ # This also happens with PEP563 (from __forward__ import annotations)
1266
+ #
1267
+ # These annotations appear in the annotation dict as ForwardRef('int').
1268
+ #
1269
+ # Then, we need to convert the string into a python object. This
1270
+ # requires having local context for custom objects or imported types.
1271
+ # rcb() is what gives us this. So, we plumb rcb through the stack so
1272
+ # it can be used in this context for the if block below.
1273
+ #
1274
+ # FAQ:
1275
+ # - Why do we need this special handling for NamedTuple but string
1276
+ # annotations work fine for normal types? Normally, we parse the
1277
+ # string directly and then call rcb() directly from C++.
1278
+ # - Why not use ForwardRef._evaluate? For that, we need globals()
1279
+ # and locals() for the local context where the NamedTuple was defined.
1280
+ # rcb is what lets us look up into these. So, basically rcb does the
1281
+ # hard work for us.
1282
+ if isinstance(field_type, ForwardRef) and rcb is not None:
1283
+ rcb_type = rcb(field_type.__forward_arg__)
1284
+ # rcb returns None if it can't find anything.
1285
+ if rcb_type is None:
1286
+ raise ValueError(
1287
+ f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}."
1288
+ f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858."
1289
+ f" Issue occurred at {loc.highlight()}"
1290
+ )
1291
+ field_type = rcb_type
1292
+ the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb)
1293
+ annotations.append(the_type)
1294
+ else:
1295
+ annotations.append(torch._C.TensorType.getInferred())
1296
+ return type(obj).__name__, obj._fields, annotations, defaults
1297
+
1298
+
1299
+ def _create_named_tuple(
1300
+ t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
1301
+ ):
1302
+ TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
1303
+ return TupleType(*t)
1304
+
1305
+
1306
+ @contextlib.contextmanager
1307
+ def _disable_emit_hooks():
1308
+ hooks = torch._C._jit_get_emit_hooks()
1309
+ torch._C._jit_set_emit_hooks(None, None)
1310
+ try:
1311
+ yield
1312
+ finally:
1313
+ torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
1314
+
1315
+
1316
+ def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
1317
+ def __enter__(self) -> None:
1318
+ self.hooks = torch._C._jit_get_emit_hooks()
1319
+ torch._C._jit_set_emit_hooks(None, None)
1320
+
1321
+ def __exit__(self, *args) -> None:
1322
+ torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
1323
+
1324
+
1325
+ def _is_exception(obj) -> bool:
1326
+ if not inspect.isclass(obj):
1327
+ return False
1328
+ return issubclass(obj, Exception)
1329
+
1330
+
1331
+ def raise_error_container_parameter_missing(target_type) -> None:
1332
+ if target_type == "Dict":
1333
+ raise RuntimeError(
1334
+ "Attempted to use Dict without "
1335
+ "contained types. Please add contained type, e.g. "
1336
+ "Dict[int, int]"
1337
+ )
1338
+ raise RuntimeError(
1339
+ f"Attempted to use {target_type} without a "
1340
+ "contained type. Please add a contained type, e.g. "
1341
+ f"{target_type}[int]"
1342
+ )
1343
+
1344
+
1345
+ def check_args_exist(target_type) -> None:
1346
+ if target_type is List or target_type is list:
1347
+ raise_error_container_parameter_missing("List")
1348
+ elif target_type is Tuple or target_type is tuple:
1349
+ raise_error_container_parameter_missing("Tuple")
1350
+ elif target_type is Dict or target_type is dict:
1351
+ raise_error_container_parameter_missing("Dict")
1352
+ elif target_type is None or target_type is Optional:
1353
+ raise_error_container_parameter_missing("Optional")
1354
+
1355
+
1356
+ def check_empty_containers(obj) -> None:
1357
+ if obj == [] or obj == {} or obj == ():
1358
+ warnings.warn(
1359
+ "The inner type of a container is lost when "
1360
+ "calling torch.jit.isinstance in eager mode. For "
1361
+ "example, List[int] would become list and "
1362
+ "therefore falsely return True for List[float] or"
1363
+ " List[str]."
1364
+ )
1365
+
1366
+
1367
+ # supports List/Dict/Tuple and Optional types
1368
+ # TODO support future
1369
+ def container_checker(obj, target_type) -> bool:
1370
+ origin_type = get_origin(target_type)
1371
+ check_args_exist(target_type)
1372
+ if origin_type is None:
1373
+ return False
1374
+ elif origin_type is list or origin_type is List:
1375
+ check_empty_containers(obj)
1376
+ if not isinstance(obj, list):
1377
+ return False
1378
+ arg_type = get_args(target_type)[0]
1379
+ arg_origin = get_origin(arg_type)
1380
+ for el in obj:
1381
+ # check if nested container, ex: List[List[str]]
1382
+ if arg_origin: # processes nested container, ex: List[List[str]]
1383
+ if not container_checker(el, arg_type):
1384
+ return False
1385
+ elif not isinstance(el, arg_type):
1386
+ return False
1387
+ return True
1388
+ elif origin_type is Dict or origin_type is dict:
1389
+ check_empty_containers(obj)
1390
+ if not isinstance(obj, dict):
1391
+ return False
1392
+ key_type = get_args(target_type)[0]
1393
+ val_type = get_args(target_type)[1]
1394
+ for key, val in obj.items():
1395
+ # check if keys are of right type
1396
+ if not isinstance(key, key_type):
1397
+ return False
1398
+ val_origin = get_origin(val_type)
1399
+ if val_origin:
1400
+ if not container_checker(val, val_type):
1401
+ return False
1402
+ elif not isinstance(val, val_type):
1403
+ return False
1404
+ return True
1405
+ elif origin_type is Tuple or origin_type is tuple:
1406
+ check_empty_containers(obj)
1407
+ if not isinstance(obj, tuple):
1408
+ return False
1409
+ arg_types = get_args(target_type)
1410
+ if len(obj) != len(arg_types):
1411
+ return False
1412
+ for el, el_type in zip(obj, arg_types):
1413
+ el_origin = get_origin(el_type)
1414
+ if el_origin:
1415
+ if not container_checker(el, el_type):
1416
+ return False
1417
+ elif not isinstance(el, el_type):
1418
+ return False
1419
+ return True
1420
+ elif origin_type is Union or issubclass(
1421
+ origin_type, BuiltinUnionType
1422
+ ): # also handles Optional
1423
+ if obj is None: # check before recursion because None is always fine
1424
+ return True
1425
+ inner_types = get_args(target_type)
1426
+ for t in inner_types:
1427
+ t_origin = get_origin(t)
1428
+ if t_origin:
1429
+ return container_checker(obj, t)
1430
+ elif isinstance(obj, t):
1431
+ return True
1432
+ return False
1433
+
1434
+
1435
+ def _isinstance(obj, target_type) -> bool:
1436
+ if isinstance(target_type, collections.abc.Container):
1437
+ if not isinstance(target_type, tuple):
1438
+ raise RuntimeError(
1439
+ "The second argument to "
1440
+ "`torch.jit.isinstance` must be a type "
1441
+ "or a tuple of types"
1442
+ )
1443
+ for t_type in target_type:
1444
+ if _isinstance(obj, t_type):
1445
+ return True
1446
+ return False
1447
+
1448
+ origin_type = get_origin(target_type)
1449
+ if origin_type:
1450
+ return container_checker(obj, target_type)
1451
+
1452
+ # Check to handle non-typed optional origin returns as none instead
1453
+ # of as optional in 3.7-3.8
1454
+ check_args_exist(target_type)
1455
+
1456
+ # handle non-containers
1457
+ return isinstance(obj, target_type)
1458
+
1459
+
1460
+ class _TensorExtractor(pickle.Pickler):
1461
+ def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
1462
+ super().__init__(*args, **kwargs)
1463
+ self.tensors = tensors
1464
+
1465
+ def persistent_id(self, obj):
1466
+ if isinstance(obj, torch.Tensor):
1467
+ self.tensors.append(obj)
1468
+ return ""
1469
+ # Since we just want to extract tensors, we don't mind if an object is
1470
+ # unpicklable if it doesn't contain tensors, as we can just ignore/skip
1471
+ # it. To play it safe, we only do so for common objects that we're sure
1472
+ # don't contain tensors. Feel free to add new types here. Note also that
1473
+ # even if a type isn't listed here this won't block users, since thet
1474
+ # can just add a __getstate__ or __reduce__ method to their class.
1475
+ if isinstance(obj, LockType):
1476
+ return ""
1477
+ # Futures and RRefs don't technically contain a value, they just offer
1478
+ # the means to access a value.
1479
+ if isinstance(obj, CFuture) or is_rref_instance(obj):
1480
+ return ""
1481
+ if isinstance(obj, CAwait):
1482
+ return ""
1483
+ if isinstance(obj, torch.cuda.Event):
1484
+ return ""
1485
+ if isinstance(obj, threading.Thread):
1486
+ return ""
1487
+ return None
1488
+
1489
+
1490
+ def _extract_tensors(obj):
1491
+ r"""
1492
+ This function is exclusively called from C++.
1493
+ See ``torch/csrc/jit/python/python_ivalue.h``.
1494
+
1495
+ It extracts the tensors contained in the given object, through pickling.
1496
+ """
1497
+ tensors: List[torch.Tensor] = []
1498
+ extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
1499
+ extractor.dump(obj)
1500
+ return tensors
1501
+
1502
+
1503
+ # In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
1504
+ # that were previously dropped. To preserve the behavior, explicitly drop them there
1505
+
1506
+ if sys.version_info > (3, 10):
1507
+ _drop(enum.Enum.__new__)
1508
+ _drop(enum.Enum.__format__)
1509
+ _drop(enum.Enum.__repr__)
1510
+ _drop(enum.Enum.__str__)
venv/lib/python3.10/site-packages/torch/_lazy/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+
3
+ import torch._C._lazy
4
+ from torch.utils._pytree import tree_flatten, tree_unflatten
5
+
6
+ from .closure import add_step_closure, run_step_closures
7
+
8
+
9
+ def mark_step(device: str = "", wait=False):
10
+ """Triggers a mark step, which amounts to
11
+ - collecting a group of 'live' lazy tensors to index into the compilation cache
12
+ (lowering/compiling their IR graphs if not cached)
13
+ - kicking off execution of the compiled function
14
+ - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator)
15
+ """
16
+ # TODO(whc) expand this to include backend hooks and align with XLA backend needs
17
+ torch._C._lazy._mark_step(device, [], wait=wait)
18
+
19
+ run_step_closures()
20
+
21
+
22
+ def wait_device_ops(devices=None):
23
+ """Waits for all the async operations on the given devices to complete.
24
+ Args:
25
+ devices (string..., optional): The devices whose async ops need to be waited
26
+ for. If empty, all the local devices will be waited for.
27
+ """
28
+ if devices is None:
29
+ devices = []
30
+ torch._C._lazy._wait_device_ops(devices=devices)
31
+
32
+
33
+ def sync_multi(tensors, devices):
34
+ """
35
+ Sync the list of lazy tensors so there IR get lowered for the activate backend
36
+ and the compiled computation graph get cached.
37
+ """
38
+ torch._C._lazy._sync_multi(tensors, devices)
39
+
40
+
41
+ def get_tensor_id(tensor):
42
+ """Return a unique id of the lazy tensor maintained by LTC"""
43
+ return torch._C._lazy._get_tensor_id(tensor)
44
+
45
+
46
+ def to_cpu(tensors, devices=None):
47
+ devices = devices or ["lazy"]
48
+
49
+ flattened, spec = tree_flatten(tensors)
50
+ sync_multi(flattened, devices)
51
+ return tree_unflatten([t.to("cpu") for t in flattened], spec)
52
+
53
+
54
+ def save(tensors, *args, **kwargs):
55
+ torch.save(to_cpu(tensors), *args, **kwargs)
venv/lib/python3.10/site-packages/torch/_lazy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
venv/lib/python3.10/site-packages/torch/_lazy/closure.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+ from queue import Empty as EmptyQueue, Queue
4
+
5
+ from torch._lazy.device_context import get_device_context
6
+
7
+
8
+ class ClosureHandler:
9
+ def __init__(self):
10
+ pass
11
+
12
+ def run(self, closure):
13
+ """Run closure function
14
+
15
+ Args:
16
+ closure: callable function to run
17
+ """
18
+ closure()
19
+
20
+ def __call__(self, closures):
21
+ for closure in closures:
22
+ self.run(closure)
23
+
24
+
25
+ class AsyncClosureHandler(ClosureHandler):
26
+ """Handler for Asynchronous Step Closures
27
+ Args:
28
+ max_queue_size: The maximum length of the closure queue after which
29
+ the training loop will block until closures are evaluated.
30
+ By default, a reasonable limit of a maximum of 100 on the queue.
31
+ This value can be set using the `XLA_MAX_ASYNC_QUEUE` environment
32
+ variable.
33
+ """
34
+
35
+ def __init__(self, max_queue_size=100):
36
+ super().__init__()
37
+ self._closure_queue: Queue = Queue(
38
+ int(os.environ.get("LTC_MAX_ASYNC_QUEUE", max_queue_size))
39
+ )
40
+ self._closure_exception: Queue = Queue()
41
+ self._closure_lock = threading.Lock()
42
+ self._closure_event_loop_finished = threading.Event()
43
+ self._closure_event_loop = None
44
+
45
+ def start_event_loop(self):
46
+ """Start closure event loop if not started"""
47
+ if self._closure_event_loop is None:
48
+
49
+ def event_loop():
50
+ # Run loop until closure event is set and closure queue is empty
51
+ while True:
52
+ try:
53
+ closure = self._closure_queue.get(block=True, timeout=3)
54
+ closure()
55
+ self._closure_queue.task_done()
56
+ except EmptyQueue:
57
+ with self._closure_lock:
58
+ if self._closure_queue.empty():
59
+ self._closure_event_loop_finished.set()
60
+ return
61
+ except Exception as e:
62
+ self._closure_exception.put(e)
63
+ return
64
+
65
+ self._closure_event_loop = threading.Thread(target=event_loop)
66
+ self._closure_event_loop.start()
67
+
68
+ def run(self, closure):
69
+ with self._closure_lock:
70
+ self._closure_queue.put(closure, block=True)
71
+ if (
72
+ self._closure_event_loop is None
73
+ or not self._closure_event_loop.is_alive()
74
+ ):
75
+ try:
76
+ e = self._closure_exception.get(block=False)
77
+ raise RuntimeError(
78
+ "Cannot run asynchronous closure due to previously raised exception"
79
+ ) from e
80
+ except EmptyQueue:
81
+ self._closure_event_loop = None
82
+ self.start_event_loop()
83
+
84
+
85
+ def add_step_closure(closure, args=(), run_async=False):
86
+ """Adds a closure to the list of the ones to be run at the end of the step.
87
+ Many times during model training there is the need to print/report (print to
88
+ console, post to tensorboard, etc...) information which require the content of
89
+ intermediary tensors to be inspected.
90
+ Inspecting different tensors content in different points of the model code
91
+ requires many executions and typically causes performance issues.
92
+ Adding a step closure will ensure that it will be run after the barrier, when
93
+ all the live tensors will be already materialized to device data.
94
+ Live tensors which will include the ones captured by the closure arguments.
95
+ So using `add_step_closure()` will ensure a single execution will be
96
+ performed, even when multiple closures are queued, requiring multiple tensors
97
+ to be inspected.
98
+ Step closures will be run sequentially in the order they have been queued.
99
+ Note that even though using this API the execution will be optimized, it is
100
+ advised to throttle the printing/reporting events once every N steps.
101
+ Args:
102
+ closure (callable): The function to be called.
103
+ args (tuple): The arguments to be passed to the closure.
104
+ run_async: If True, run the closure asynchronously.
105
+ """
106
+ devctx = get_device_context()
107
+ closures_type = "async_step_closures" if run_async else "step_closures"
108
+ step_closures = getattr(devctx, closures_type, None)
109
+ if step_closures is None:
110
+ step_closures = []
111
+ setattr(devctx, closures_type, step_closures)
112
+ step_closures.append(lambda a=args: closure(*a))
113
+
114
+
115
+ def run_step_closures():
116
+ devctx = get_device_context()
117
+ async_step_closures = getattr(devctx, "async_step_closures", None)
118
+ if async_step_closures is not None:
119
+ devctx.async_step_closures = []
120
+ async_closure_handler = getattr(devctx, "async_closure_handler", None)
121
+ if async_closure_handler is None:
122
+ async_closure_handler = AsyncClosureHandler()
123
+ devctx.async_closure_handler = async_closure_handler
124
+ async_closure_handler(async_step_closures)
125
+
126
+ step_closures = getattr(devctx, "step_closures", None)
127
+ if step_closures is not None:
128
+ devctx.step_closures = []
129
+ closure_handler = getattr(devctx, "closure_handler", None)
130
+ if closure_handler is None:
131
+ closure_handler = ClosureHandler()
132
+ devctx.closure_handler = closure_handler
133
+ closure_handler(step_closures)
134
+ return devctx
venv/lib/python3.10/site-packages/torch/_lazy/computation.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+ import torch._C._lazy_ts_backend
3
+
4
+
5
+ def get_tensors_ts_device_data_node(tensors):
6
+ """Return tensor ids and eager tensors for DeviceData nodes in the
7
+ IR for the passed in lazy tensors.
8
+
9
+ TODO: This API is currently ts backend specific. We are working on
10
+ generalizing it to all backends including XLA.
11
+ """
12
+ return torch._C._lazy_ts_backend._get_tensors_ts_device_data_node(tensors)
13
+
14
+
15
+ def get_graph_hash(tensors):
16
+ """Return the graph hash for the passed in lazy tensors"""
17
+ return torch._C._lazy._get_graph_hash(tensors)
18
+
19
+
20
+ def run_cached_graph(hash_str, graph_inputs):
21
+ """Running the cached computation graph with the given inputs
22
+
23
+ TODO: This API is currently ts backend specific. We are working on
24
+ generalizing it to all backends including XLA.
25
+ """
26
+ return torch._C._lazy_ts_backend._run_cached_graph(hash_str, graph_inputs)
venv/lib/python3.10/site-packages/torch/_lazy/config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def get_force_fallback():
5
+ """Get the config used to force LTC fallback"""
6
+ return torch._C._lazy._get_force_fallback()
7
+
8
+
9
+ def set_force_fallback(configval):
10
+ """Set the config used to force LTC fallback"""
11
+ torch._C._lazy._set_force_fallback(configval)
12
+
13
+
14
+ def set_reuse_ir(val: bool):
15
+ """Set the config to reuse IR nodes for faster tracing"""
16
+ torch._C._lazy._set_reuse_ir(val)
venv/lib/python3.10/site-packages/torch/_lazy/debug.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def render_ir_graph(tensors):
5
+ """Return a text dump of the LTC IR graph in dot format for the tensors.
6
+ The text can be processed by tools like dot to be rendered in pdf,png etc."""
7
+ return torch._C._lazy._get_tensors_dot(tensors)
8
+
9
+
10
+ def dump_ir(tensors, ir_format):
11
+ """Return a dump of the tensors in the specified format.
12
+ Valid format are
13
+ - text: for LTC IR
14
+ - backend: for the activate backend IR
15
+ """
16
+ if ir_format == "text":
17
+ return torch._C._lazy._get_tensors_text(tensors)
18
+ elif ir_format == "backend":
19
+ return torch._C._lazy._get_tensors_backend(tensors)
20
+ else:
21
+ raise RuntimeError(f"Unrecognized IR format: {ir_format}")
venv/lib/python3.10/site-packages/torch/_lazy/device_context.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ from typing import Any, Dict
3
+
4
+ import torch._C._lazy
5
+
6
+
7
+ class DeviceContext:
8
+ _CONTEXTS: Dict[str, Any] = dict()
9
+ _CONTEXTS_LOCK = threading.Lock()
10
+
11
+ def __init__(self, device):
12
+ self.device = device
13
+
14
+
15
+ def get_device_context(device=None):
16
+ if device is None:
17
+ device = torch._C._lazy._get_default_device_type()
18
+ else:
19
+ device = str(device)
20
+ with DeviceContext._CONTEXTS_LOCK:
21
+ devctx = DeviceContext._CONTEXTS.get(device, None)
22
+ if devctx is None:
23
+ devctx = DeviceContext(device)
24
+ DeviceContext._CONTEXTS[device] = devctx
25
+ return devctx
venv/lib/python3.10/site-packages/torch/_lazy/extract_compiled_graph.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import dataclasses
3
+ import itertools
4
+ import os
5
+ from typing import Any, Callable, Dict, List
6
+
7
+ import torch
8
+ import torch._lazy as lazy
9
+ import torch._lazy.metrics as metrics
10
+ from torch import fx
11
+ from torch._lazy import computation, debug as lazy_debug
12
+ from torch._lazy.tensor_factory_functions import tensor_factory_functions
13
+
14
+ debug = os.environ.get("debug_extract_compiled_graph") is not None
15
+
16
+
17
+ @dataclasses.dataclass
18
+ class GraphInputMatcher:
19
+ """
20
+ The GraphInputMatcher class setup the graph inputs for future calls after lazy tracing.
21
+ Specifically, those graph inputs corresponding to method parameters should be replaced with the
22
+ arguments for the current call.
23
+
24
+ tensor_id_to_arg_idx maps the tensor id to the parameter index.
25
+ graph_input_tensor_ids, graph_input_ivalues list the tensor_id and ivalue for each of the
26
+ TS/XLA graph inputs.
27
+ """
28
+
29
+ tensor_id_to_arg_idx: Dict[int, int]
30
+ graph_input_tensor_ids: List[int]
31
+ # there are 2 categories of graph_input_tensors.
32
+ # Category 1: those whose id are not found in tensor_id_to_arg_idx. These are
33
+ # most likely const tensors and we can get its content from graph_input_tensors
34
+ # Category 2: those whose id are found in tensor_id_to_arg_idx. We should get
35
+ # the tensor from method arguments
36
+ graph_input_ivalues: List[Any]
37
+
38
+ # get the real graph input tensors
39
+ def __call__(self, args):
40
+ real_input = []
41
+ for tensor_id, traced_ivalue in zip(
42
+ self.graph_input_tensor_ids, self.graph_input_ivalues
43
+ ):
44
+ arg_idx = self.tensor_id_to_arg_idx.get(tensor_id, None)
45
+ if arg_idx is None:
46
+ inp = traced_ivalue
47
+ else:
48
+ inp = args[arg_idx]
49
+ real_input.append(inp)
50
+ return real_input
51
+
52
+
53
+ class ReturnValueHandler:
54
+ r"""
55
+ When ltc_sync_multi is called on multi tensors, the compiled graph
56
+ will contain output only for unique tensors - if a tensor appears multiple
57
+ times in the input to _ltc_sync_multi, only the first occurance matters.
58
+
59
+ However from python level, we still expect multi tensors returned with duplciation
60
+ even if the TS graph dedup the output. e.g. for method:
61
+
62
+ def forward(self, a):
63
+ return a, a
64
+
65
+ the TS graph captured by LTC will return a single tensor, but Python method expects 2.
66
+
67
+ This class dedup the lazy tensors first to get the index that will be used
68
+ to duplicate the eager tensors later.
69
+ """
70
+
71
+ def __init__(self, lazy_out_list):
72
+ self.index: List[List[int]] = []
73
+ self.total_count = len(lazy_out_list)
74
+
75
+ tensor_id_to_idx: Dict[int, int] = {}
76
+ for dup_idx, lazy_tensor in enumerate(lazy_out_list):
77
+ uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)
78
+ if uniq_idx is not None:
79
+ self.index[uniq_idx].append(dup_idx)
80
+ else:
81
+ uniq_idx = len(self.index)
82
+ self.index.append([dup_idx])
83
+ tensor_id_to_idx[id(lazy_tensor)] = uniq_idx
84
+
85
+ def duplicate_eager_tensors(self, eager_tensor_list):
86
+ duplicated_list = [None] * self.total_count
87
+ assert len(eager_tensor_list) == len(self.index)
88
+
89
+ for uniq_idx, eager_tensor in enumerate(eager_tensor_list):
90
+ for dup_idx in self.index[uniq_idx]:
91
+ duplicated_list[dup_idx] = eager_tensor
92
+ return duplicated_list
93
+
94
+
95
+ def force_lazy_device(model: fx.GraphModule):
96
+ """
97
+ Factory methods in a Fx graph may create tensors for a specific eager devices.
98
+ If we take no actions, those eager tensors will be mixed with lazy tensors and
99
+ cause crash. This method overwrite those eager device to lazy device.
100
+ """
101
+
102
+ def tolazydevice(dev):
103
+ if isinstance(dev, torch.device):
104
+ return torch.device("lazy", index=dev.index)
105
+ return dev
106
+
107
+ def hasDeviceArg(args, kwargs):
108
+ return any(
109
+ isinstance(arg, torch.device)
110
+ for arg in itertools.chain(args, kwargs.values())
111
+ )
112
+
113
+ for nd in model.graph.nodes:
114
+ nd.args = tuple(tolazydevice(arg) for arg in nd.args)
115
+ nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}
116
+
117
+ # For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return
118
+ # eager tensors on the default device
119
+ # (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove,
120
+ # and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart).
121
+ # To force those tensors on the lazy device, we can not simply override
122
+ # the device argument since there is no explicit device argument.
123
+ # What we are doing here is, for the list of covered tensor factory methods
124
+ # we add a lazy device argument explicity.
125
+ #
126
+ # TODO: This solution is no ideal since we may miss some factory methods. In future
127
+ # when we support lazy mode, this method can be replaced by that.
128
+ if nd.target in tensor_factory_functions and not hasDeviceArg(
129
+ nd.args, nd.kwargs
130
+ ):
131
+ kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy.
132
+ kwargs["device"] = torch.device("lazy")
133
+ nd.kwargs = kwargs
134
+
135
+ model.recompile()
136
+
137
+
138
+ def get_fallback_ops():
139
+ fallback_ops = []
140
+ for opname in metrics.counter_names():
141
+ if "aten::" not in opname:
142
+ continue
143
+ val = int(metrics.counter_value(opname))
144
+ if val > 0:
145
+ fallback_ops.append(f"{opname}={val}")
146
+
147
+ return fallback_ops
148
+
149
+
150
+ def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:
151
+ """
152
+ Optimize an eager model with LTC and returns a wrapper to execute the
153
+ compiled graph directly without retracing. It depends on other mechanisms
154
+ like TorchDynamo guards to guarantee the returned wrapper is only called
155
+ when it's safe.
156
+ """
157
+ lazy_args = [arg.to(device="lazy") for arg in example_inputs]
158
+ args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]
159
+ tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)}
160
+ lazy_model = copy.deepcopy(model).to(device=torch.device("lazy"))
161
+ force_lazy_device(lazy_model)
162
+
163
+ # This line executes lazy tracing and enable us extracting compiled graph later
164
+ metrics.reset()
165
+ lazy_out = lazy_model(*lazy_args)
166
+ fallback_ops = get_fallback_ops()
167
+ metrics.reset()
168
+
169
+ if len(fallback_ops) > 0:
170
+ raise RuntimeError(
171
+ f"Fail to extact the compiled graph because of fallback: {','.join(fallback_ops)}"
172
+ )
173
+
174
+ if not isinstance(lazy_out, (tuple, list)):
175
+ lazy_out = (lazy_out,)
176
+
177
+ args_and_out = tuple(lazy_args) + tuple(lazy_out)
178
+ return_value_handler = ReturnValueHandler(args_and_out)
179
+ if debug:
180
+ print("Fx code:\n", model.code)
181
+ print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text"))
182
+
183
+ # TODO: this part is TS backend specific for now and will be generalized to
184
+ # support XLA
185
+ (
186
+ graph_input_tensor_ids,
187
+ graph_input_ivalues,
188
+ ) = computation.get_tensors_ts_device_data_node(args_and_out)
189
+ assert len(graph_input_tensor_ids) == len(graph_input_ivalues)
190
+ graph_input_matcher = GraphInputMatcher(
191
+ tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues
192
+ )
193
+
194
+ graph_hash = computation.get_graph_hash(args_and_out)
195
+
196
+ if debug:
197
+ print("graph_hash", graph_hash)
198
+ print(f"args_tensor_ids {args_tensor_ids}")
199
+ print("tensor ids from device data:", graph_input_tensor_ids)
200
+
201
+ # sync the list of output tensors so the computation graph for these
202
+ # tensors will be cached. Those computation graphs can be retrieved
203
+ # by graph hash later.
204
+ lazy.sync_multi(args_and_out, [])
205
+
206
+ def optimized_mod(*args):
207
+ if len(args_and_out) == 0:
208
+ return ()
209
+ graph_input = graph_input_matcher(args)
210
+ res = return_value_handler.duplicate_eager_tensors(
211
+ computation.run_cached_graph(graph_hash, graph_input)
212
+ )
213
+
214
+ assert len(res) == len(args_and_out)
215
+ for i, arg in enumerate(args):
216
+ # only copy those tensors that get inplace updated
217
+ if arg is not res[i]:
218
+ arg.copy_(res[i])
219
+
220
+ # skip the args
221
+ return res[len(args) :]
222
+
223
+ return optimized_mod
venv/lib/python3.10/site-packages/torch/_lazy/ir_cache.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def dump(dot_file_name: str):
5
+ """Dump TrieCache in the dot format"""
6
+ return torch._C._lazy._dump_ir_cache(dot_file_name)
7
+
8
+
9
+ def reset():
10
+ """Clear TrieCache. This is needed in testing to avoid
11
+ node reusing between different tests.
12
+ """
13
+ return torch._C._lazy._clear_ir_cache()
venv/lib/python3.10/site-packages/torch/_lazy/metrics.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch._C._lazy
2
+
3
+
4
+ def reset():
5
+ """Resets all metric counters."""
6
+ torch._C._lazy._reset_metrics()
7
+
8
+
9
+ def counter_names():
10
+ """Retrieves all the currently active counter names."""
11
+ return torch._C._lazy._counter_names()
12
+
13
+
14
+ def counter_value(name: str):
15
+ """Return the value of the counter with the speficied name"""
16
+ return torch._C._lazy._counter_value(name)
17
+
18
+
19
+ def metrics_report():
20
+ """Return the combined (lazy core and backend) metric report"""
21
+ return torch._C._lazy._metrics_report()
venv/lib/python3.10/site-packages/torch/_lazy/tensor_factory_functions.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ """
4
+ tensor_factory_functions defines the list of torch functions that create tensors.
5
+ The list is grabbed by searching thru native_functions.yaml by the following
6
+ regular expression:
7
+
8
+ cat native_functions.yaml | grep 'func:' | grep -v "Tensor.*->" | grep "[-]>.*Tensor"
9
+
10
+ It's possible that new tensor factory functions are added making this list stale.
11
+ Use at your own risk or regenerate the list.
12
+ """
13
+ tensor_factory_functions = (
14
+ torch._cudnn_init_dropout_state,
15
+ torch.arange,
16
+ torch.bartlett_window,
17
+ torch.blackman_window,
18
+ torch._empty_affine_quantized,
19
+ torch.empty_strided,
20
+ torch.eye,
21
+ torch.full,
22
+ torch.from_file,
23
+ torch.hann_window,
24
+ torch.hamming_window,
25
+ torch.kaiser_window,
26
+ torch.linspace,
27
+ torch.logspace,
28
+ torch.ones,
29
+ torch.scalar_tensor,
30
+ torch.rand,
31
+ torch.randint,
32
+ torch.randn,
33
+ torch.randperm,
34
+ torch.range,
35
+ torch._efficientzerotensor,
36
+ torch.zeros,
37
+ torch.tril_indices,
38
+ torch.triu_indices,
39
+ # Note: the following functions match the regular expression search above but
40
+ # they are not available in the torch module. Comment out.
41
+ # torch._sparse_coo_tensor_with_dims,
42
+ # torch.fft_fftfreq,
43
+ # torch.fft_rfftfreq,
44
+ ) + (
45
+ # torch.tensor is special since it's not in native_functions.yaml
46
+ # add it separately
47
+ torch.tensor,
48
+ )
venv/lib/python3.10/site-packages/torch/_lazy/ts_backend.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import torch._C._lazy_ts_backend
2
+
3
+
4
+ def init():
5
+ """Initializes the lazy Torchscript backend"""
6
+ torch._C._lazy_ts_backend._init()
venv/lib/python3.10/site-packages/torch/_linalg_utils.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Various linear algebra utility methods for internal use.
2
+
3
+ """
4
+
5
+ from typing import Optional, Tuple
6
+
7
+ import torch
8
+ from torch import Tensor
9
+
10
+
11
+ def is_sparse(A):
12
+ """Check if tensor A is a sparse tensor"""
13
+ if isinstance(A, torch.Tensor):
14
+ return A.layout == torch.sparse_coo
15
+
16
+ error_str = "expected Tensor"
17
+ if not torch.jit.is_scripting():
18
+ error_str += f" but got {type(A)}"
19
+ raise TypeError(error_str)
20
+
21
+
22
+ def get_floating_dtype(A):
23
+ """Return the floating point dtype of tensor A.
24
+
25
+ Integer types map to float32.
26
+ """
27
+ dtype = A.dtype
28
+ if dtype in (torch.float16, torch.float32, torch.float64):
29
+ return dtype
30
+ return torch.float32
31
+
32
+
33
+ def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
34
+ """Multiply two matrices.
35
+
36
+ If A is None, return B. A can be sparse or dense. B is always
37
+ dense.
38
+ """
39
+ if A is None:
40
+ return B
41
+ if is_sparse(A):
42
+ return torch.sparse.mm(A, B)
43
+ return torch.matmul(A, B)
44
+
45
+
46
+ def conjugate(A):
47
+ """Return conjugate of tensor A.
48
+
49
+ .. note:: If A's dtype is not complex, A is returned.
50
+ """
51
+ if A.is_complex():
52
+ return A.conj()
53
+ return A
54
+
55
+
56
+ def transpose(A):
57
+ """Return transpose of a matrix or batches of matrices."""
58
+ ndim = len(A.shape)
59
+ return A.transpose(ndim - 1, ndim - 2)
60
+
61
+
62
+ def transjugate(A):
63
+ """Return transpose conjugate of a matrix or batches of matrices."""
64
+ return conjugate(transpose(A))
65
+
66
+
67
+ def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
68
+ """Return bilinear form of matrices: :math:`X^T A Y`."""
69
+ return matmul(transpose(X), matmul(A, Y))
70
+
71
+
72
+ def qform(A: Optional[Tensor], S: Tensor):
73
+ """Return quadratic form :math:`S^T A S`."""
74
+ return bform(S, A, S)
75
+
76
+
77
+ def basis(A):
78
+ """Return orthogonal basis of A columns."""
79
+ return torch.linalg.qr(A).Q
80
+
81
+
82
+ def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
83
+ """Return eigenpairs of A with specified ordering."""
84
+ if largest is None:
85
+ largest = False
86
+ E, Z = torch.linalg.eigh(A, UPLO="U")
87
+ # assuming that E is ordered
88
+ if largest:
89
+ E = torch.flip(E, dims=(-1,))
90
+ Z = torch.flip(Z, dims=(-1,))
91
+ return E, Z
92
+
93
+
94
+ # These functions were deprecated and removed
95
+ # This nice error message can be removed in version 1.13+
96
+ def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
97
+ raise RuntimeError(
98
+ "This function was deprecated since version 1.9 and is now removed.\n"
99
+ "Please use the `torch.linalg.matrix_rank` function instead. "
100
+ "The parameter 'symmetric' was renamed in `torch.linalg.matrix_rank()` to 'hermitian'."
101
+ )
102
+
103
+
104
+ def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
105
+ raise RuntimeError(
106
+ "This function was deprecated since version 1.9 and is now removed. "
107
+ "`torch.solve` is deprecated in favor of `torch.linalg.solve`. "
108
+ "`torch.linalg.solve` has its arguments reversed and does not return the LU factorization.\n\n"
109
+ "To get the LU factorization see `torch.lu`, which can be used with `torch.lu_solve` or `torch.lu_unpack`.\n"
110
+ "X = torch.solve(B, A).solution "
111
+ "should be replaced with:\n"
112
+ "X = torch.linalg.solve(A, B)"
113
+ )
114
+
115
+
116
+ def lstsq(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
117
+ raise RuntimeError(
118
+ "This function was deprecated since version 1.9 and is now removed. "
119
+ "`torch.lstsq` is deprecated in favor of `torch.linalg.lstsq`.\n"
120
+ "`torch.linalg.lstsq` has reversed arguments and does not return the QR decomposition in "
121
+ "the returned tuple (although it returns other information about the problem).\n\n"
122
+ "To get the QR decomposition consider using `torch.linalg.qr`.\n\n"
123
+ "The returned solution in `torch.lstsq` stored the residuals of the solution in the "
124
+ "last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, "
125
+ "the residuals are in the field 'residuals' of the returned named tuple.\n\n"
126
+ "The unpacking of the solution, as in\n"
127
+ "X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n"
128
+ "should be replaced with:\n"
129
+ "X = torch.linalg.lstsq(A, B).solution"
130
+ )
131
+
132
+
133
+ def _symeig(
134
+ input, eigenvectors=False, upper=True, *, out=None
135
+ ) -> Tuple[Tensor, Tensor]:
136
+ raise RuntimeError(
137
+ "This function was deprecated since version 1.9 and is now removed. "
138
+ "The default behavior has changed from using the upper triangular portion of the matrix by default "
139
+ "to using the lower triangular portion.\n\n"
140
+ "L, _ = torch.symeig(A, upper=upper) "
141
+ "should be replaced with:\n"
142
+ "L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n\n"
143
+ "and\n\n"
144
+ "L, V = torch.symeig(A, eigenvectors=True) "
145
+ "should be replaced with:\n"
146
+ "L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
147
+ )
148
+
149
+
150
+ def eig(
151
+ self: Tensor, eigenvectors: bool = False, *, e=None, v=None
152
+ ) -> Tuple[Tensor, Tensor]:
153
+ raise RuntimeError(
154
+ "This function was deprecated since version 1.9 and is now removed. "
155
+ "`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble` rather than real tensors "
156
+ "mimicking complex tensors.\n\n"
157
+ "L, _ = torch.eig(A) "
158
+ "should be replaced with:\n"
159
+ "L_complex = torch.linalg.eigvals(A)\n\n"
160
+ "and\n\n"
161
+ "L, V = torch.eig(A, eigenvectors=True) "
162
+ "should be replaced with:\n"
163
+ "L_complex, V_complex = torch.linalg.eig(A)"
164
+ )
venv/lib/python3.10/site-packages/torch/_lobpcg.py ADDED
@@ -0,0 +1,1167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Locally Optimal Block Preconditioned Conjugate Gradient methods.
2
+ """
3
+ # Author: Pearu Peterson
4
+ # Created: February 2020
5
+
6
+ from typing import Dict, Optional, Tuple
7
+
8
+ import torch
9
+ from torch import Tensor
10
+ from . import _linalg_utils as _utils
11
+ from .overrides import handle_torch_function, has_torch_function
12
+
13
+
14
+ __all__ = ["lobpcg"]
15
+
16
+
17
+ def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
18
+ # compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0
19
+ F = D.unsqueeze(-2) - D.unsqueeze(-1)
20
+ F.diagonal(dim1=-2, dim2=-1).fill_(float("inf"))
21
+ F.pow_(-1)
22
+
23
+ # A.grad = U (D.grad + (U^T U.grad * F)) U^T
24
+ Ut = U.mT.contiguous()
25
+ res = torch.matmul(
26
+ U, torch.matmul(torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F, Ut)
27
+ )
28
+
29
+ return res
30
+
31
+
32
+ def _polynomial_coefficients_given_roots(roots):
33
+ """
34
+ Given the `roots` of a polynomial, find the polynomial's coefficients.
35
+
36
+ If roots = (r_1, ..., r_n), then the method returns
37
+ coefficients (a_0, a_1, ..., a_n (== 1)) so that
38
+ p(x) = (x - r_1) * ... * (x - r_n)
39
+ = x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0
40
+
41
+ Note: for better performance requires writing a low-level kernel
42
+ """
43
+ poly_order = roots.shape[-1]
44
+ poly_coeffs_shape = list(roots.shape)
45
+ # we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0,
46
+ # so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)},
47
+ # but we insert one extra coefficient to enable better vectorization below
48
+ poly_coeffs_shape[-1] += 2
49
+ poly_coeffs = roots.new_zeros(poly_coeffs_shape)
50
+ poly_coeffs[..., 0] = 1
51
+ poly_coeffs[..., -1] = 1
52
+
53
+ # perform the Horner's rule
54
+ for i in range(1, poly_order + 1):
55
+ # note that it is computationally hard to compute backward for this method,
56
+ # because then given the coefficients it would require finding the roots and/or
57
+ # calculating the sensitivity based on the Vieta's theorem.
58
+ # So the code below tries to circumvent the explicit root finding by series
59
+ # of operations on memory copies imitating the Horner's method.
60
+ # The memory copies are required to construct nodes in the computational graph
61
+ # by exploting the explicit (not in-place, separate node for each step)
62
+ # recursion of the Horner's method.
63
+ # Needs more memory, O(... * k^2), but with only O(... * k^2) complexity.
64
+ poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs
65
+ out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)
66
+ out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(
67
+ -1, poly_order - i + 1, i + 1
68
+ )
69
+ poly_coeffs = poly_coeffs_new
70
+
71
+ return poly_coeffs.narrow(-1, 1, poly_order + 1)
72
+
73
+
74
+ def _polynomial_value(poly, x, zero_power, transition):
75
+ """
76
+ A generic method for computing poly(x) using the Horner's rule.
77
+
78
+ Args:
79
+ poly (Tensor): the (possibly batched) 1D Tensor representing
80
+ polynomial coefficients such that
81
+ poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and
82
+ poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n
83
+
84
+ x (Tensor): the value (possible batched) to evalate the polynomial `poly` at.
85
+
86
+ zero_power (Tensor): the representation of `x^0`. It is application-specific.
87
+
88
+ transition (Callable): the function that accepts some intermediate result `int_val`,
89
+ the `x` and a specific polynomial coefficient
90
+ `poly[..., k]` for some iteration `k`.
91
+ It basically performs one iteration of the Horner's rule
92
+ defined as `x * int_val + poly[..., k] * zero_power`.
93
+ Note that `zero_power` is not a parameter,
94
+ because the step `+ poly[..., k] * zero_power` depends on `x`,
95
+ whether it is a vector, a matrix, or something else, so this
96
+ functionality is delegated to the user.
97
+ """
98
+
99
+ res = zero_power.clone()
100
+ for k in range(poly.size(-1) - 2, -1, -1):
101
+ res = transition(res, x, poly[..., k])
102
+ return res
103
+
104
+
105
+ def _matrix_polynomial_value(poly, x, zero_power=None):
106
+ """
107
+ Evaluates `poly(x)` for the (batched) matrix input `x`.
108
+ Check out `_polynomial_value` function for more details.
109
+ """
110
+
111
+ # matrix-aware Horner's rule iteration
112
+ def transition(curr_poly_val, x, poly_coeff):
113
+ res = x.matmul(curr_poly_val)
114
+ res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))
115
+ return res
116
+
117
+ if zero_power is None:
118
+ zero_power = torch.eye(
119
+ x.size(-1), x.size(-1), dtype=x.dtype, device=x.device
120
+ ).view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
121
+
122
+ return _polynomial_value(poly, x, zero_power, transition)
123
+
124
+
125
+ def _vector_polynomial_value(poly, x, zero_power=None):
126
+ """
127
+ Evaluates `poly(x)` for the (batched) vector input `x`.
128
+ Check out `_polynomial_value` function for more details.
129
+ """
130
+
131
+ # vector-aware Horner's rule iteration
132
+ def transition(curr_poly_val, x, poly_coeff):
133
+ res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
134
+ return res
135
+
136
+ if zero_power is None:
137
+ zero_power = x.new_ones(1).expand(x.shape)
138
+
139
+ return _polynomial_value(poly, x, zero_power, transition)
140
+
141
+
142
+ def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest):
143
+ # compute a projection operator onto an orthogonal subspace spanned by the
144
+ # columns of U defined as (I - UU^T)
145
+ Ut = U.mT.contiguous()
146
+ proj_U_ortho = -U.matmul(Ut)
147
+ proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1)
148
+
149
+ # compute U_ortho, a basis for the orthogonal complement to the span(U),
150
+ # by projecting a random [..., m, m - k] matrix onto the subspace spanned
151
+ # by the columns of U.
152
+ #
153
+ # fix generator for determinism
154
+ gen = torch.Generator(A.device)
155
+
156
+ # orthogonal complement to the span(U)
157
+ U_ortho = proj_U_ortho.matmul(
158
+ torch.randn(
159
+ (*A.shape[:-1], A.size(-1) - D.size(-1)),
160
+ dtype=A.dtype,
161
+ device=A.device,
162
+ generator=gen,
163
+ )
164
+ )
165
+ U_ortho_t = U_ortho.mT.contiguous()
166
+
167
+ # compute the coefficients of the characteristic polynomial of the tensor D.
168
+ # Note that D is diagonal, so the diagonal elements are exactly the roots
169
+ # of the characteristic polynomial.
170
+ chr_poly_D = _polynomial_coefficients_given_roots(D)
171
+
172
+ # the code belows finds the explicit solution to the Sylvester equation
173
+ # U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U
174
+ # and incorporates it into the whole gradient stored in the `res` variable.
175
+ #
176
+ # Equivalent to the following naive implementation:
177
+ # res = A.new_zeros(A.shape)
178
+ # p_res = A.new_zeros(*A.shape[:-1], D.size(-1))
179
+ # for k in range(1, chr_poly_D.size(-1)):
180
+ # p_res.zero_()
181
+ # for i in range(0, k):
182
+ # p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2)
183
+ # res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t())
184
+ #
185
+ # Note that dX is a differential, so the gradient contribution comes from the backward sensitivity
186
+ # Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g,
187
+ # and we need to compute g(U_grad, A, U, D)
188
+ #
189
+ # The naive implementation is based on the paper
190
+ # Hu, Qingxi, and Daizhan Cheng.
191
+ # "The polynomial solution to the Sylvester matrix equation."
192
+ # Applied mathematics letters 19.9 (2006): 859-864.
193
+ #
194
+ # We can modify the computation of `p_res` from above in a more efficient way
195
+ # p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2)
196
+ # + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2)
197
+ # + ...
198
+ # + A.matrix_power(k - 1) U_grad * chr_poly_D[k]
199
+ # Note that this saves us from redundant matrix products with A (elimination of matrix_power)
200
+ U_grad_projected = U_grad
201
+ series_acc = U_grad_projected.new_zeros(U_grad_projected.shape)
202
+ for k in range(1, chr_poly_D.size(-1)):
203
+ poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D)
204
+ series_acc += U_grad_projected * poly_D.unsqueeze(-2)
205
+ U_grad_projected = A.matmul(U_grad_projected)
206
+
207
+ # compute chr_poly_D(A) which essentially is:
208
+ #
209
+ # chr_poly_D_at_A = A.new_zeros(A.shape)
210
+ # for k in range(chr_poly_D.size(-1)):
211
+ # chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k)
212
+ #
213
+ # Note, however, for better performance we use the Horner's rule
214
+ chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A)
215
+
216
+ # compute the action of `chr_poly_D_at_A` restricted to U_ortho_t
217
+ chr_poly_D_at_A_to_U_ortho = torch.matmul(
218
+ U_ortho_t, torch.matmul(chr_poly_D_at_A, U_ortho)
219
+ )
220
+ # we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its
221
+ # Cholesky decomposition and then use `torch.cholesky_solve` for better stability.
222
+ # Cholesky decomposition requires the input to be positive-definite.
223
+ # Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if
224
+ # 1. `largest` == False, or
225
+ # 2. `largest` == True and `k` is even
226
+ # under the assumption that `A` has distinct eigenvalues.
227
+ #
228
+ # check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite
229
+ chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1
230
+ chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky(
231
+ chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho
232
+ )
233
+
234
+ # compute the gradient part in span(U)
235
+ res = _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
236
+
237
+ # incorporate the Sylvester equation solution into the full gradient
238
+ # it resides in span(U_ortho)
239
+ res -= U_ortho.matmul(
240
+ chr_poly_D_at_A_to_U_ortho_sign
241
+ * torch.cholesky_solve(
242
+ U_ortho_t.matmul(series_acc), chr_poly_D_at_A_to_U_ortho_L
243
+ )
244
+ ).matmul(Ut)
245
+
246
+ return res
247
+
248
+
249
+ def _symeig_backward(D_grad, U_grad, A, D, U, largest):
250
+ # if `U` is square, then the columns of `U` is a complete eigenspace
251
+ if U.size(-1) == U.size(-2):
252
+ return _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
253
+ else:
254
+ return _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest)
255
+
256
+
257
+ class LOBPCGAutogradFunction(torch.autograd.Function):
258
+ @staticmethod
259
+ def forward( # type: ignore[override]
260
+ ctx,
261
+ A: Tensor,
262
+ k: Optional[int] = None,
263
+ B: Optional[Tensor] = None,
264
+ X: Optional[Tensor] = None,
265
+ n: Optional[int] = None,
266
+ iK: Optional[Tensor] = None,
267
+ niter: Optional[int] = None,
268
+ tol: Optional[float] = None,
269
+ largest: Optional[bool] = None,
270
+ method: Optional[str] = None,
271
+ tracker: None = None,
272
+ ortho_iparams: Optional[Dict[str, int]] = None,
273
+ ortho_fparams: Optional[Dict[str, float]] = None,
274
+ ortho_bparams: Optional[Dict[str, bool]] = None,
275
+ ) -> Tuple[Tensor, Tensor]:
276
+ # makes sure that input is contiguous for efficiency.
277
+ # Note: autograd does not support dense gradients for sparse input yet.
278
+ A = A.contiguous() if (not A.is_sparse) else A
279
+ if B is not None:
280
+ B = B.contiguous() if (not B.is_sparse) else B
281
+
282
+ D, U = _lobpcg(
283
+ A,
284
+ k,
285
+ B,
286
+ X,
287
+ n,
288
+ iK,
289
+ niter,
290
+ tol,
291
+ largest,
292
+ method,
293
+ tracker,
294
+ ortho_iparams,
295
+ ortho_fparams,
296
+ ortho_bparams,
297
+ )
298
+
299
+ ctx.save_for_backward(A, B, D, U)
300
+ ctx.largest = largest
301
+
302
+ return D, U
303
+
304
+ @staticmethod
305
+ def backward(ctx, D_grad, U_grad):
306
+ A_grad = B_grad = None
307
+ grads = [None] * 14
308
+
309
+ A, B, D, U = ctx.saved_tensors
310
+ largest = ctx.largest
311
+
312
+ # lobpcg.backward has some limitations. Checks for unsupported input
313
+ if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]):
314
+ raise ValueError(
315
+ "lobpcg.backward does not support sparse input yet."
316
+ "Note that lobpcg.forward does though."
317
+ )
318
+ if (
319
+ A.dtype in (torch.complex64, torch.complex128)
320
+ or B is not None
321
+ and B.dtype in (torch.complex64, torch.complex128)
322
+ ):
323
+ raise ValueError(
324
+ "lobpcg.backward does not support complex input yet."
325
+ "Note that lobpcg.forward does though."
326
+ )
327
+ if B is not None:
328
+ raise ValueError(
329
+ "lobpcg.backward does not support backward with B != I yet."
330
+ )
331
+
332
+ if largest is None:
333
+ largest = True
334
+
335
+ # symeig backward
336
+ if B is None:
337
+ A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest)
338
+
339
+ # A has index 0
340
+ grads[0] = A_grad
341
+ # B has index 2
342
+ grads[2] = B_grad
343
+ return tuple(grads)
344
+
345
+
346
+ def lobpcg(
347
+ A: Tensor,
348
+ k: Optional[int] = None,
349
+ B: Optional[Tensor] = None,
350
+ X: Optional[Tensor] = None,
351
+ n: Optional[int] = None,
352
+ iK: Optional[Tensor] = None,
353
+ niter: Optional[int] = None,
354
+ tol: Optional[float] = None,
355
+ largest: Optional[bool] = None,
356
+ method: Optional[str] = None,
357
+ tracker: None = None,
358
+ ortho_iparams: Optional[Dict[str, int]] = None,
359
+ ortho_fparams: Optional[Dict[str, float]] = None,
360
+ ortho_bparams: Optional[Dict[str, bool]] = None,
361
+ ) -> Tuple[Tensor, Tensor]:
362
+ """Find the k largest (or smallest) eigenvalues and the corresponding
363
+ eigenvectors of a symmetric positive definite generalized
364
+ eigenvalue problem using matrix-free LOBPCG methods.
365
+
366
+ This function is a front-end to the following LOBPCG algorithms
367
+ selectable via `method` argument:
368
+
369
+ `method="basic"` - the LOBPCG method introduced by Andrew
370
+ Knyazev, see [Knyazev2001]. A less robust method, may fail when
371
+ Cholesky is applied to singular input.
372
+
373
+ `method="ortho"` - the LOBPCG method with orthogonal basis
374
+ selection [StathopoulosEtal2002]. A robust method.
375
+
376
+ Supported inputs are dense, sparse, and batches of dense matrices.
377
+
378
+ .. note:: In general, the basic method spends least time per
379
+ iteration. However, the robust methods converge much faster and
380
+ are more stable. So, the usage of the basic method is generally
381
+ not recommended but there exist cases where the usage of the
382
+ basic method may be preferred.
383
+
384
+ .. warning:: The backward method does not support sparse and complex inputs.
385
+ It works only when `B` is not provided (i.e. `B == None`).
386
+ We are actively working on extensions, and the details of
387
+ the algorithms are going to be published promptly.
388
+
389
+ .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
390
+ To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
391
+ in first-order optimization routines, prior to running `lobpcg`
392
+ we do the following symmetrization map: `A -> (A + A.t()) / 2`.
393
+ The map is performed only when the `A` requires gradients.
394
+
395
+ Args:
396
+
397
+ A (Tensor): the input tensor of size :math:`(*, m, m)`
398
+
399
+ B (Tensor, optional): the input tensor of size :math:`(*, m,
400
+ m)`. When not specified, `B` is interpreted as
401
+ identity matrix.
402
+
403
+ X (tensor, optional): the input tensor of size :math:`(*, m, n)`
404
+ where `k <= n <= m`. When specified, it is used as
405
+ initial approximation of eigenvectors. X must be a
406
+ dense tensor.
407
+
408
+ iK (tensor, optional): the input tensor of size :math:`(*, m,
409
+ m)`. When specified, it will be used as preconditioner.
410
+
411
+ k (integer, optional): the number of requested
412
+ eigenpairs. Default is the number of :math:`X`
413
+ columns (when specified) or `1`.
414
+
415
+ n (integer, optional): if :math:`X` is not specified then `n`
416
+ specifies the size of the generated random
417
+ approximation of eigenvectors. Default value for `n`
418
+ is `k`. If :math:`X` is specified, the value of `n`
419
+ (when specified) must be the number of :math:`X`
420
+ columns.
421
+
422
+ tol (float, optional): residual tolerance for stopping
423
+ criterion. Default is `feps ** 0.5` where `feps` is
424
+ smallest non-zero floating-point number of the given
425
+ input tensor `A` data type.
426
+
427
+ largest (bool, optional): when True, solve the eigenproblem for
428
+ the largest eigenvalues. Otherwise, solve the
429
+ eigenproblem for smallest eigenvalues. Default is
430
+ `True`.
431
+
432
+ method (str, optional): select LOBPCG method. See the
433
+ description of the function above. Default is
434
+ "ortho".
435
+
436
+ niter (int, optional): maximum number of iterations. When
437
+ reached, the iteration process is hard-stopped and
438
+ the current approximation of eigenpairs is returned.
439
+ For infinite iteration but until convergence criteria
440
+ is met, use `-1`.
441
+
442
+ tracker (callable, optional) : a function for tracing the
443
+ iteration process. When specified, it is called at
444
+ each iteration step with LOBPCG instance as an
445
+ argument. The LOBPCG instance holds the full state of
446
+ the iteration process in the following attributes:
447
+
448
+ `iparams`, `fparams`, `bparams` - dictionaries of
449
+ integer, float, and boolean valued input
450
+ parameters, respectively
451
+
452
+ `ivars`, `fvars`, `bvars`, `tvars` - dictionaries
453
+ of integer, float, boolean, and Tensor valued
454
+ iteration variables, respectively.
455
+
456
+ `A`, `B`, `iK` - input Tensor arguments.
457
+
458
+ `E`, `X`, `S`, `R` - iteration Tensor variables.
459
+
460
+ For instance:
461
+
462
+ `ivars["istep"]` - the current iteration step
463
+ `X` - the current approximation of eigenvectors
464
+ `E` - the current approximation of eigenvalues
465
+ `R` - the current residual
466
+ `ivars["converged_count"]` - the current number of converged eigenpairs
467
+ `tvars["rerr"]` - the current state of convergence criteria
468
+
469
+ Note that when `tracker` stores Tensor objects from
470
+ the LOBPCG instance, it must make copies of these.
471
+
472
+ If `tracker` sets `bvars["force_stop"] = True`, the
473
+ iteration process will be hard-stopped.
474
+
475
+ ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
476
+ various parameters to LOBPCG algorithm when using
477
+ `method="ortho"`.
478
+
479
+ Returns:
480
+
481
+ E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
482
+
483
+ X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
484
+
485
+ References:
486
+
487
+ [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
488
+ Preconditioned Eigensolver: Locally Optimal Block Preconditioned
489
+ Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
490
+ 517-541. (25 pages)
491
+ https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
492
+
493
+ [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
494
+ Wu. (2002) A Block Orthogonalization Procedure with Constant
495
+ Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
496
+ 2165-2182. (18 pages)
497
+ https://epubs.siam.org/doi/10.1137/S1064827500370883
498
+
499
+ [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
500
+ Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
501
+ SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
502
+ https://epubs.siam.org/doi/abs/10.1137/17M1129830
503
+
504
+ """
505
+
506
+ if not torch.jit.is_scripting():
507
+ tensor_ops = (A, B, X, iK)
508
+ if not set(map(type, tensor_ops)).issubset(
509
+ (torch.Tensor, type(None))
510
+ ) and has_torch_function(tensor_ops):
511
+ return handle_torch_function(
512
+ lobpcg,
513
+ tensor_ops,
514
+ A,
515
+ k=k,
516
+ B=B,
517
+ X=X,
518
+ n=n,
519
+ iK=iK,
520
+ niter=niter,
521
+ tol=tol,
522
+ largest=largest,
523
+ method=method,
524
+ tracker=tracker,
525
+ ortho_iparams=ortho_iparams,
526
+ ortho_fparams=ortho_fparams,
527
+ ortho_bparams=ortho_bparams,
528
+ )
529
+
530
+ if not torch._jit_internal.is_scripting():
531
+ if A.requires_grad or (B is not None and B.requires_grad):
532
+ # While it is expected that `A` is symmetric,
533
+ # the `A_grad` might be not. Therefore we perform the trick below,
534
+ # so that `A_grad` becomes symmetric.
535
+ # The symmetrization is important for first-order optimization methods,
536
+ # so that (A - alpha * A_grad) is still a symmetric matrix.
537
+ # Same holds for `B`.
538
+ A_sym = (A + A.mT) / 2
539
+ B_sym = (B + B.mT) / 2 if (B is not None) else None
540
+
541
+ return LOBPCGAutogradFunction.apply(
542
+ A_sym,
543
+ k,
544
+ B_sym,
545
+ X,
546
+ n,
547
+ iK,
548
+ niter,
549
+ tol,
550
+ largest,
551
+ method,
552
+ tracker,
553
+ ortho_iparams,
554
+ ortho_fparams,
555
+ ortho_bparams,
556
+ )
557
+ else:
558
+ if A.requires_grad or (B is not None and B.requires_grad):
559
+ raise RuntimeError(
560
+ "Script and require grads is not supported atm."
561
+ "If you just want to do the forward, use .detach()"
562
+ "on A and B before calling into lobpcg"
563
+ )
564
+
565
+ return _lobpcg(
566
+ A,
567
+ k,
568
+ B,
569
+ X,
570
+ n,
571
+ iK,
572
+ niter,
573
+ tol,
574
+ largest,
575
+ method,
576
+ tracker,
577
+ ortho_iparams,
578
+ ortho_fparams,
579
+ ortho_bparams,
580
+ )
581
+
582
+
583
+ def _lobpcg(
584
+ A: Tensor,
585
+ k: Optional[int] = None,
586
+ B: Optional[Tensor] = None,
587
+ X: Optional[Tensor] = None,
588
+ n: Optional[int] = None,
589
+ iK: Optional[Tensor] = None,
590
+ niter: Optional[int] = None,
591
+ tol: Optional[float] = None,
592
+ largest: Optional[bool] = None,
593
+ method: Optional[str] = None,
594
+ tracker: None = None,
595
+ ortho_iparams: Optional[Dict[str, int]] = None,
596
+ ortho_fparams: Optional[Dict[str, float]] = None,
597
+ ortho_bparams: Optional[Dict[str, bool]] = None,
598
+ ) -> Tuple[Tensor, Tensor]:
599
+ # A must be square:
600
+ assert A.shape[-2] == A.shape[-1], A.shape
601
+ if B is not None:
602
+ # A and B must have the same shapes:
603
+ assert A.shape == B.shape, (A.shape, B.shape)
604
+
605
+ dtype = _utils.get_floating_dtype(A)
606
+ device = A.device
607
+ if tol is None:
608
+ feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype]
609
+ tol = feps**0.5
610
+
611
+ m = A.shape[-1]
612
+ k = (1 if X is None else X.shape[-1]) if k is None else k
613
+ n = (k if n is None else n) if X is None else X.shape[-1]
614
+
615
+ if m < 3 * n:
616
+ raise ValueError(
617
+ f"LPBPCG algorithm is not applicable when the number of A rows (={m})"
618
+ f" is smaller than 3 x the number of requested eigenpairs (={n})"
619
+ )
620
+
621
+ method = "ortho" if method is None else method
622
+
623
+ iparams = {
624
+ "m": m,
625
+ "n": n,
626
+ "k": k,
627
+ "niter": 1000 if niter is None else niter,
628
+ }
629
+
630
+ fparams = {
631
+ "tol": tol,
632
+ }
633
+
634
+ bparams = {"largest": True if largest is None else largest}
635
+
636
+ if method == "ortho":
637
+ if ortho_iparams is not None:
638
+ iparams.update(ortho_iparams)
639
+ if ortho_fparams is not None:
640
+ fparams.update(ortho_fparams)
641
+ if ortho_bparams is not None:
642
+ bparams.update(ortho_bparams)
643
+ iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3)
644
+ iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3)
645
+ fparams["ortho_tol"] = fparams.get("ortho_tol", tol)
646
+ fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol)
647
+ fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol)
648
+ bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False)
649
+
650
+ if not torch.jit.is_scripting():
651
+ LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[method-assign]
652
+
653
+ if len(A.shape) > 2:
654
+ N = int(torch.prod(torch.tensor(A.shape[:-2])))
655
+ bA = A.reshape((N,) + A.shape[-2:])
656
+ bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None
657
+ bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None
658
+ bE = torch.empty((N, k), dtype=dtype, device=device)
659
+ bXret = torch.empty((N, m, k), dtype=dtype, device=device)
660
+
661
+ for i in range(N):
662
+ A_ = bA[i]
663
+ B_ = bB[i] if bB is not None else None
664
+ X_ = (
665
+ torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i]
666
+ )
667
+ assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n))
668
+ iparams["batch_index"] = i
669
+ worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker)
670
+ worker.run()
671
+ bE[i] = worker.E[:k]
672
+ bXret[i] = worker.X[:, :k]
673
+
674
+ if not torch.jit.is_scripting():
675
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign]
676
+
677
+ return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
678
+
679
+ X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X
680
+ assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n))
681
+
682
+ worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker)
683
+
684
+ worker.run()
685
+
686
+ if not torch.jit.is_scripting():
687
+ LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign]
688
+
689
+ return worker.E[:k], worker.X[:, :k]
690
+
691
+
692
+ class LOBPCG:
693
+ """Worker class of LOBPCG methods."""
694
+
695
+ def __init__(
696
+ self,
697
+ A: Optional[Tensor],
698
+ B: Optional[Tensor],
699
+ X: Tensor,
700
+ iK: Optional[Tensor],
701
+ iparams: Dict[str, int],
702
+ fparams: Dict[str, float],
703
+ bparams: Dict[str, bool],
704
+ method: str,
705
+ tracker: None,
706
+ ) -> None:
707
+ # constant parameters
708
+ self.A = A
709
+ self.B = B
710
+ self.iK = iK
711
+ self.iparams = iparams
712
+ self.fparams = fparams
713
+ self.bparams = bparams
714
+ self.method = method
715
+ self.tracker = tracker
716
+ m = iparams["m"]
717
+ n = iparams["n"]
718
+
719
+ # variable parameters
720
+ self.X = X
721
+ self.E = torch.zeros((n,), dtype=X.dtype, device=X.device)
722
+ self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device)
723
+ self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device)
724
+ self.tvars: Dict[str, Tensor] = {}
725
+ self.ivars: Dict[str, int] = {"istep": 0}
726
+ self.fvars: Dict[str, float] = {"_": 0.0}
727
+ self.bvars: Dict[str, bool] = {"_": False}
728
+
729
+ def __str__(self):
730
+ lines = ["LOPBCG:"]
731
+ lines += [f" iparams={self.iparams}"]
732
+ lines += [f" fparams={self.fparams}"]
733
+ lines += [f" bparams={self.bparams}"]
734
+ lines += [f" ivars={self.ivars}"]
735
+ lines += [f" fvars={self.fvars}"]
736
+ lines += [f" bvars={self.bvars}"]
737
+ lines += [f" tvars={self.tvars}"]
738
+ lines += [f" A={self.A}"]
739
+ lines += [f" B={self.B}"]
740
+ lines += [f" iK={self.iK}"]
741
+ lines += [f" X={self.X}"]
742
+ lines += [f" E={self.E}"]
743
+ r = ""
744
+ for line in lines:
745
+ r += line + "\n"
746
+ return r
747
+
748
+ def update(self):
749
+ """Set and update iteration variables."""
750
+ if self.ivars["istep"] == 0:
751
+ X_norm = float(torch.norm(self.X))
752
+ iX_norm = X_norm**-1
753
+ A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm
754
+ B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm
755
+ self.fvars["X_norm"] = X_norm
756
+ self.fvars["A_norm"] = A_norm
757
+ self.fvars["B_norm"] = B_norm
758
+ self.ivars["iterations_left"] = self.iparams["niter"]
759
+ self.ivars["converged_count"] = 0
760
+ self.ivars["converged_end"] = 0
761
+
762
+ if self.method == "ortho":
763
+ self._update_ortho()
764
+ else:
765
+ self._update_basic()
766
+
767
+ self.ivars["iterations_left"] = self.ivars["iterations_left"] - 1
768
+ self.ivars["istep"] = self.ivars["istep"] + 1
769
+
770
+ def update_residual(self):
771
+ """Update residual R from A, B, X, E."""
772
+ mm = _utils.matmul
773
+ self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E
774
+
775
+ def update_converged_count(self):
776
+ """Determine the number of converged eigenpairs using backward stable
777
+ convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018].
778
+
779
+ Users may redefine this method for custom convergence criteria.
780
+ """
781
+ # (...) -> int
782
+ prev_count = self.ivars["converged_count"]
783
+ tol = self.fparams["tol"]
784
+ A_norm = self.fvars["A_norm"]
785
+ B_norm = self.fvars["B_norm"]
786
+ E, X, R = self.E, self.X, self.R
787
+ rerr = (
788
+ torch.norm(R, 2, (0,))
789
+ * (torch.norm(X, 2, (0,)) * (A_norm + E[: X.shape[-1]] * B_norm)) ** -1
790
+ )
791
+ converged = rerr < tol
792
+ count = 0
793
+ for b in converged:
794
+ if not b:
795
+ # ignore convergence of following pairs to ensure
796
+ # strict ordering of eigenpairs
797
+ break
798
+ count += 1
799
+ assert (
800
+ count >= prev_count
801
+ ), f"the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease"
802
+ self.ivars["converged_count"] = count
803
+ self.tvars["rerr"] = rerr
804
+ return count
805
+
806
+ def stop_iteration(self):
807
+ """Return True to stop iterations.
808
+
809
+ Note that tracker (if defined) can force-stop iterations by
810
+ setting ``worker.bvars['force_stop'] = True``.
811
+ """
812
+ return (
813
+ self.bvars.get("force_stop", False)
814
+ or self.ivars["iterations_left"] == 0
815
+ or self.ivars["converged_count"] >= self.iparams["k"]
816
+ )
817
+
818
+ def run(self):
819
+ """Run LOBPCG iterations.
820
+
821
+ Use this method as a template for implementing LOBPCG
822
+ iteration scheme with custom tracker that is compatible with
823
+ TorchScript.
824
+ """
825
+ self.update()
826
+
827
+ if not torch.jit.is_scripting() and self.tracker is not None:
828
+ self.call_tracker()
829
+
830
+ while not self.stop_iteration():
831
+ self.update()
832
+
833
+ if not torch.jit.is_scripting() and self.tracker is not None:
834
+ self.call_tracker()
835
+
836
+ @torch.jit.unused
837
+ def call_tracker(self):
838
+ """Interface for tracking iteration process in Python mode.
839
+
840
+ Tracking the iteration process is disabled in TorchScript
841
+ mode. In fact, one should specify tracker=None when JIT
842
+ compiling functions using lobpcg.
843
+ """
844
+ # do nothing when in TorchScript mode
845
+ pass
846
+
847
+ # Internal methods
848
+
849
+ def _update_basic(self):
850
+ """
851
+ Update or initialize iteration variables when `method == "basic"`.
852
+ """
853
+ mm = torch.matmul
854
+ ns = self.ivars["converged_end"]
855
+ nc = self.ivars["converged_count"]
856
+ n = self.iparams["n"]
857
+ largest = self.bparams["largest"]
858
+
859
+ if self.ivars["istep"] == 0:
860
+ Ri = self._get_rayleigh_ritz_transform(self.X)
861
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
862
+ E, Z = _utils.symeig(M, largest)
863
+ self.X[:] = mm(self.X, mm(Ri, Z))
864
+ self.E[:] = E
865
+ np = 0
866
+ self.update_residual()
867
+ nc = self.update_converged_count()
868
+ self.S[..., :n] = self.X
869
+
870
+ W = _utils.matmul(self.iK, self.R)
871
+ self.ivars["converged_end"] = ns = n + np + W.shape[-1]
872
+ self.S[:, n + np : ns] = W
873
+ else:
874
+ S_ = self.S[:, nc:ns]
875
+ Ri = self._get_rayleigh_ritz_transform(S_)
876
+ M = _utils.qform(_utils.qform(self.A, S_), Ri)
877
+ E_, Z = _utils.symeig(M, largest)
878
+ self.X[:, nc:] = mm(S_, mm(Ri, Z[:, : n - nc]))
879
+ self.E[nc:] = E_[: n - nc]
880
+ P = mm(S_, mm(Ri, Z[:, n : 2 * n - nc]))
881
+ np = P.shape[-1]
882
+
883
+ self.update_residual()
884
+ nc = self.update_converged_count()
885
+ self.S[..., :n] = self.X
886
+ self.S[:, n : n + np] = P
887
+ W = _utils.matmul(self.iK, self.R[:, nc:])
888
+
889
+ self.ivars["converged_end"] = ns = n + np + W.shape[-1]
890
+ self.S[:, n + np : ns] = W
891
+
892
+ def _update_ortho(self):
893
+ """
894
+ Update or initialize iteration variables when `method == "ortho"`.
895
+ """
896
+ mm = torch.matmul
897
+ ns = self.ivars["converged_end"]
898
+ nc = self.ivars["converged_count"]
899
+ n = self.iparams["n"]
900
+ largest = self.bparams["largest"]
901
+
902
+ if self.ivars["istep"] == 0:
903
+ Ri = self._get_rayleigh_ritz_transform(self.X)
904
+ M = _utils.qform(_utils.qform(self.A, self.X), Ri)
905
+ E, Z = _utils.symeig(M, largest)
906
+ self.X = mm(self.X, mm(Ri, Z))
907
+ self.update_residual()
908
+ np = 0
909
+ nc = self.update_converged_count()
910
+ self.S[:, :n] = self.X
911
+ W = self._get_ortho(self.R, self.X)
912
+ ns = self.ivars["converged_end"] = n + np + W.shape[-1]
913
+ self.S[:, n + np : ns] = W
914
+
915
+ else:
916
+ S_ = self.S[:, nc:ns]
917
+ # Rayleigh-Ritz procedure
918
+ E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest)
919
+
920
+ # Update E, X, P
921
+ self.X[:, nc:] = mm(S_, Z[:, : n - nc])
922
+ self.E[nc:] = E_[: n - nc]
923
+ P = mm(
924
+ S_,
925
+ mm(
926
+ Z[:, n - nc :],
927
+ _utils.basis(_utils.transpose(Z[: n - nc, n - nc :])),
928
+ ),
929
+ )
930
+ np = P.shape[-1]
931
+
932
+ # check convergence
933
+ self.update_residual()
934
+ nc = self.update_converged_count()
935
+
936
+ # update S
937
+ self.S[:, :n] = self.X
938
+ self.S[:, n : n + np] = P
939
+ W = self._get_ortho(self.R[:, nc:], self.S[:, : n + np])
940
+ ns = self.ivars["converged_end"] = n + np + W.shape[-1]
941
+ self.S[:, n + np : ns] = W
942
+
943
+ def _get_rayleigh_ritz_transform(self, S):
944
+ """Return a transformation matrix that is used in Rayleigh-Ritz
945
+ procedure for reducing a general eigenvalue problem :math:`(S^TAS)
946
+ C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T
947
+ S^TAS Ri) Z = Z E` where `C = Ri Z`.
948
+
949
+ .. note:: In the original Rayleight-Ritz procedure in
950
+ [DuerschEtal2018], the problem is formulated as follows::
951
+
952
+ SAS = S^T A S
953
+ SBS = S^T B S
954
+ D = (<diagonal matrix of SBS>) ** -1/2
955
+ R^T R = Cholesky(D SBS D)
956
+ Ri = D R^-1
957
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
958
+ C = Ri Z
959
+
960
+ To reduce the number of matrix products (denoted by empty
961
+ space between matrices), here we introduce element-wise
962
+ products (denoted by symbol `*`) so that the Rayleight-Ritz
963
+ procedure becomes::
964
+
965
+ SAS = S^T A S
966
+ SBS = S^T B S
967
+ d = (<diagonal of SBS>) ** -1/2 # this is 1-d column vector
968
+ dd = d d^T # this is 2-d matrix
969
+ R^T R = Cholesky(dd * SBS)
970
+ Ri = R^-1 * d # broadcasting
971
+ solve symeig problem Ri^T SAS Ri Z = Theta Z
972
+ C = Ri Z
973
+
974
+ where `dd` is 2-d matrix that replaces matrix products `D M
975
+ D` with one element-wise product `M * dd`; and `d` replaces
976
+ matrix product `D M` with element-wise product `M *
977
+ d`. Also, creating the diagonal matrix `D` is avoided.
978
+
979
+ Args:
980
+ S (Tensor): the matrix basis for the search subspace, size is
981
+ :math:`(m, n)`.
982
+
983
+ Returns:
984
+ Ri (tensor): upper-triangular transformation matrix of size
985
+ :math:`(n, n)`.
986
+
987
+ """
988
+ B = self.B
989
+ mm = torch.matmul
990
+ SBS = _utils.qform(B, S)
991
+ d_row = SBS.diagonal(0, -2, -1) ** -0.5
992
+ d_col = d_row.reshape(d_row.shape[0], 1)
993
+ # TODO use torch.linalg.cholesky_solve once it is implemented
994
+ R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True)
995
+ return torch.linalg.solve_triangular(
996
+ R, d_row.diag_embed(), upper=True, left=False
997
+ )
998
+
999
+ def _get_svqb(
1000
+ self, U: Tensor, drop: bool, tau: float # Tensor # bool # float
1001
+ ) -> Tensor:
1002
+ """Return B-orthonormal U.
1003
+
1004
+ .. note:: When `drop` is `False` then `svqb` is based on the
1005
+ Algorithm 4 from [DuerschPhD2015] that is a slight
1006
+ modification of the corresponding algorithm
1007
+ introduced in [StathopolousWu2002].
1008
+
1009
+ Args:
1010
+
1011
+ U (Tensor) : initial approximation, size is (m, n)
1012
+ drop (bool) : when True, drop columns that
1013
+ contribution to the `span([U])` is small.
1014
+ tau (float) : positive tolerance
1015
+
1016
+ Returns:
1017
+
1018
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size
1019
+ is (m, n1), where `n1 = n` if `drop` is `False,
1020
+ otherwise `n1 <= n`.
1021
+
1022
+ """
1023
+ if torch.numel(U) == 0:
1024
+ return U
1025
+ UBU = _utils.qform(self.B, U)
1026
+ d = UBU.diagonal(0, -2, -1)
1027
+
1028
+ # Detect and drop exact zero columns from U. While the test
1029
+ # `abs(d) == 0` is unlikely to be True for random data, it is
1030
+ # possible to construct input data to lobpcg where it will be
1031
+ # True leading to a failure (notice the `d ** -0.5` operation
1032
+ # in the original algorithm). To prevent the failure, we drop
1033
+ # the exact zero columns here and then continue with the
1034
+ # original algorithm below.
1035
+ nz = torch.where(abs(d) != 0.0)
1036
+ assert len(nz) == 1, nz
1037
+ if len(nz[0]) < len(d):
1038
+ U = U[:, nz[0]]
1039
+ if torch.numel(U) == 0:
1040
+ return U
1041
+ UBU = _utils.qform(self.B, U)
1042
+ d = UBU.diagonal(0, -2, -1)
1043
+ nz = torch.where(abs(d) != 0.0)
1044
+ assert len(nz[0]) == len(d)
1045
+
1046
+ # The original algorithm 4 from [DuerschPhD2015].
1047
+ d_col = (d**-0.5).reshape(d.shape[0], 1)
1048
+ DUBUD = (UBU * d_col) * _utils.transpose(d_col)
1049
+ E, Z = _utils.symeig(DUBUD)
1050
+ t = tau * abs(E).max()
1051
+ if drop:
1052
+ keep = torch.where(E > t)
1053
+ assert len(keep) == 1, keep
1054
+ E = E[keep[0]]
1055
+ Z = Z[:, keep[0]]
1056
+ d_col = d_col[keep[0]]
1057
+ else:
1058
+ E[(torch.where(E < t))[0]] = t
1059
+
1060
+ return torch.matmul(U * _utils.transpose(d_col), Z * E**-0.5)
1061
+
1062
+ def _get_ortho(self, U, V):
1063
+ """Return B-orthonormal U with columns are B-orthogonal to V.
1064
+
1065
+ .. note:: When `bparams["ortho_use_drop"] == False` then
1066
+ `_get_ortho` is based on the Algorithm 3 from
1067
+ [DuerschPhD2015] that is a slight modification of
1068
+ the corresponding algorithm introduced in
1069
+ [StathopolousWu2002]. Otherwise, the method
1070
+ implements Algorithm 6 from [DuerschPhD2015]
1071
+
1072
+ .. note:: If all U columns are B-collinear to V then the
1073
+ returned tensor U will be empty.
1074
+
1075
+ Args:
1076
+
1077
+ U (Tensor) : initial approximation, size is (m, n)
1078
+ V (Tensor) : B-orthogonal external basis, size is (m, k)
1079
+
1080
+ Returns:
1081
+
1082
+ U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`)
1083
+ such that :math:`V^T B U=0`, size is (m, n1),
1084
+ where `n1 = n` if `drop` is `False, otherwise
1085
+ `n1 <= n`.
1086
+ """
1087
+ mm = torch.matmul
1088
+ mm_B = _utils.matmul
1089
+ m = self.iparams["m"]
1090
+ tau_ortho = self.fparams["ortho_tol"]
1091
+ tau_drop = self.fparams["ortho_tol_drop"]
1092
+ tau_replace = self.fparams["ortho_tol_replace"]
1093
+ i_max = self.iparams["ortho_i_max"]
1094
+ j_max = self.iparams["ortho_j_max"]
1095
+ # when use_drop==True, enable dropping U columns that have
1096
+ # small contribution to the `span([U, V])`.
1097
+ use_drop = self.bparams["ortho_use_drop"]
1098
+
1099
+ # clean up variables from the previous call
1100
+ for vkey in list(self.fvars.keys()):
1101
+ if vkey.startswith("ortho_") and vkey.endswith("_rerr"):
1102
+ self.fvars.pop(vkey)
1103
+ self.ivars.pop("ortho_i", 0)
1104
+ self.ivars.pop("ortho_j", 0)
1105
+
1106
+ BV_norm = torch.norm(mm_B(self.B, V))
1107
+ BU = mm_B(self.B, U)
1108
+ VBU = mm(_utils.transpose(V), BU)
1109
+ i = j = 0
1110
+ stats = ""
1111
+ for i in range(i_max):
1112
+ U = U - mm(V, VBU)
1113
+ drop = False
1114
+ tau_svqb = tau_drop
1115
+ for j in range(j_max):
1116
+ if use_drop:
1117
+ U = self._get_svqb(U, drop, tau_svqb)
1118
+ drop = True
1119
+ tau_svqb = tau_replace
1120
+ else:
1121
+ U = self._get_svqb(U, False, tau_replace)
1122
+ if torch.numel(U) == 0:
1123
+ # all initial U columns are B-collinear to V
1124
+ self.ivars["ortho_i"] = i
1125
+ self.ivars["ortho_j"] = j
1126
+ return U
1127
+ BU = mm_B(self.B, U)
1128
+ UBU = mm(_utils.transpose(U), BU)
1129
+ U_norm = torch.norm(U)
1130
+ BU_norm = torch.norm(BU)
1131
+ R = UBU - torch.eye(UBU.shape[-1], device=UBU.device, dtype=UBU.dtype)
1132
+ R_norm = torch.norm(R)
1133
+ # https://github.com/pytorch/pytorch/issues/33810 workaround:
1134
+ rerr = float(R_norm) * float(BU_norm * U_norm) ** -1
1135
+ vkey = f"ortho_UBUmI_rerr[{i}, {j}]"
1136
+ self.fvars[vkey] = rerr
1137
+ if rerr < tau_ortho:
1138
+ break
1139
+ VBU = mm(_utils.transpose(V), BU)
1140
+ VBU_norm = torch.norm(VBU)
1141
+ U_norm = torch.norm(U)
1142
+ rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1
1143
+ vkey = f"ortho_VBU_rerr[{i}]"
1144
+ self.fvars[vkey] = rerr
1145
+ if rerr < tau_ortho:
1146
+ break
1147
+ if m < U.shape[-1] + V.shape[-1]:
1148
+ # TorchScript needs the class var to be assigned to a local to
1149
+ # do optional type refinement
1150
+ B = self.B
1151
+ assert B is not None
1152
+ raise ValueError(
1153
+ "Overdetermined shape of U:"
1154
+ f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold"
1155
+ )
1156
+ self.ivars["ortho_i"] = i
1157
+ self.ivars["ortho_j"] = j
1158
+ return U
1159
+
1160
+
1161
+ # Calling tracker is separated from LOBPCG definitions because
1162
+ # TorchScript does not support user-defined callback arguments:
1163
+ LOBPCG_call_tracker_orig = LOBPCG.call_tracker
1164
+
1165
+
1166
+ def LOBPCG_call_tracker(self):
1167
+ self.tracker(self)
venv/lib/python3.10/site-packages/torch/_lowrank.py ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implement various linear algebra algorithms for low rank matrices.
2
+ """
3
+
4
+ __all__ = ["svd_lowrank", "pca_lowrank"]
5
+
6
+ from typing import Optional, Tuple
7
+
8
+ import torch
9
+ from torch import Tensor
10
+ from . import _linalg_utils as _utils
11
+ from .overrides import handle_torch_function, has_torch_function
12
+
13
+
14
+ def get_approximate_basis(
15
+ A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None
16
+ ) -> Tensor:
17
+ """Return tensor :math:`Q` with :math:`q` orthonormal columns such
18
+ that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
19
+ specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
20
+ approximates :math:`A - M`.
21
+
22
+ .. note:: The implementation is based on the Algorithm 4.4 from
23
+ Halko et al, 2009.
24
+
25
+ .. note:: For an adequate approximation of a k-rank matrix
26
+ :math:`A`, where k is not known in advance but could be
27
+ estimated, the number of :math:`Q` columns, q, can be
28
+ choosen according to the following criteria: in general,
29
+ :math:`k <= q <= min(2*k, m, n)`. For large low-rank
30
+ matrices, take :math:`q = k + 5..10`. If k is
31
+ relatively small compared to :math:`min(m, n)`, choosing
32
+ :math:`q = k + 0..2` may be sufficient.
33
+
34
+ .. note:: To obtain repeatable results, reset the seed for the
35
+ pseudorandom number generator
36
+
37
+ Args::
38
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
39
+
40
+ q (int): the dimension of subspace spanned by :math:`Q`
41
+ columns.
42
+
43
+ niter (int, optional): the number of subspace iterations to
44
+ conduct; ``niter`` must be a
45
+ nonnegative integer. In most cases, the
46
+ default value 2 is more than enough.
47
+
48
+ M (Tensor, optional): the input tensor's mean of size
49
+ :math:`(*, 1, n)`.
50
+
51
+ References::
52
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
53
+ structure with randomness: probabilistic algorithms for
54
+ constructing approximate matrix decompositions,
55
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
56
+ `arXiv <http://arxiv.org/abs/0909.4061>`_).
57
+ """
58
+
59
+ niter = 2 if niter is None else niter
60
+ m, n = A.shape[-2:]
61
+ dtype = _utils.get_floating_dtype(A)
62
+ matmul = _utils.matmul
63
+
64
+ R = torch.randn(n, q, dtype=dtype, device=A.device)
65
+
66
+ # The following code could be made faster using torch.geqrf + torch.ormqr
67
+ # but geqrf is not differentiable
68
+ A_H = _utils.transjugate(A)
69
+ if M is None:
70
+ Q = torch.linalg.qr(matmul(A, R)).Q
71
+ for i in range(niter):
72
+ Q = torch.linalg.qr(matmul(A_H, Q)).Q
73
+ Q = torch.linalg.qr(matmul(A, Q)).Q
74
+ else:
75
+ M_H = _utils.transjugate(M)
76
+ Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
77
+ for i in range(niter):
78
+ Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q
79
+ Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q
80
+
81
+ return Q
82
+
83
+
84
+ def svd_lowrank(
85
+ A: Tensor,
86
+ q: Optional[int] = 6,
87
+ niter: Optional[int] = 2,
88
+ M: Optional[Tensor] = None,
89
+ ) -> Tuple[Tensor, Tensor, Tensor]:
90
+ r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
91
+ batches of matrices, or a sparse matrix :math:`A` such that
92
+ :math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
93
+ SVD is computed for the matrix :math:`A - M`.
94
+
95
+ .. note:: The implementation is based on the Algorithm 5.1 from
96
+ Halko et al, 2009.
97
+
98
+ .. note:: To obtain repeatable results, reset the seed for the
99
+ pseudorandom number generator
100
+
101
+ .. note:: The input is assumed to be a low-rank matrix.
102
+
103
+ .. note:: In general, use the full-rank SVD implementation
104
+ :func:`torch.linalg.svd` for dense matrices due to its 10-fold
105
+ higher performance characteristics. The low-rank SVD
106
+ will be useful for huge sparse matrices that
107
+ :func:`torch.linalg.svd` cannot handle.
108
+
109
+ Args::
110
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
111
+
112
+ q (int, optional): a slightly overestimated rank of A.
113
+
114
+ niter (int, optional): the number of subspace iterations to
115
+ conduct; niter must be a nonnegative
116
+ integer, and defaults to 2
117
+
118
+ M (Tensor, optional): the input tensor's mean of size
119
+ :math:`(*, 1, n)`.
120
+
121
+ References::
122
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
123
+ structure with randomness: probabilistic algorithms for
124
+ constructing approximate matrix decompositions,
125
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
126
+ `arXiv <https://arxiv.org/abs/0909.4061>`_).
127
+
128
+ """
129
+ if not torch.jit.is_scripting():
130
+ tensor_ops = (A, M)
131
+ if not set(map(type, tensor_ops)).issubset(
132
+ (torch.Tensor, type(None))
133
+ ) and has_torch_function(tensor_ops):
134
+ return handle_torch_function(
135
+ svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
136
+ )
137
+ return _svd_lowrank(A, q=q, niter=niter, M=M)
138
+
139
+
140
+ def _svd_lowrank(
141
+ A: Tensor,
142
+ q: Optional[int] = 6,
143
+ niter: Optional[int] = 2,
144
+ M: Optional[Tensor] = None,
145
+ ) -> Tuple[Tensor, Tensor, Tensor]:
146
+ q = 6 if q is None else q
147
+ m, n = A.shape[-2:]
148
+ matmul = _utils.matmul
149
+ if M is None:
150
+ M_t = None
151
+ else:
152
+ M_t = _utils.transpose(M)
153
+ A_t = _utils.transpose(A)
154
+
155
+ # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
156
+ # the number conjugate and transpose operations
157
+ if m < n or n > q:
158
+ # computing the SVD approximation of a transpose in
159
+ # order to keep B shape minimal (the m < n case) or the V
160
+ # shape small (the n > q case)
161
+ Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
162
+ Q_c = _utils.conjugate(Q)
163
+ if M is None:
164
+ B_t = matmul(A, Q_c)
165
+ else:
166
+ B_t = matmul(A, Q_c) - matmul(M, Q_c)
167
+ assert B_t.shape[-2] == m, (B_t.shape, m)
168
+ assert B_t.shape[-1] == q, (B_t.shape, q)
169
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
170
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
171
+ V = Vh.mH
172
+ V = Q.matmul(V)
173
+ else:
174
+ Q = get_approximate_basis(A, q, niter=niter, M=M)
175
+ Q_c = _utils.conjugate(Q)
176
+ if M is None:
177
+ B = matmul(A_t, Q_c)
178
+ else:
179
+ B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
180
+ B_t = _utils.transpose(B)
181
+ assert B_t.shape[-2] == q, (B_t.shape, q)
182
+ assert B_t.shape[-1] == n, (B_t.shape, n)
183
+ assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
184
+ U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
185
+ V = Vh.mH
186
+ U = Q.matmul(U)
187
+
188
+ return U, S, V
189
+
190
+
191
+ def pca_lowrank(
192
+ A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2
193
+ ) -> Tuple[Tensor, Tensor, Tensor]:
194
+ r"""Performs linear Principal Component Analysis (PCA) on a low-rank
195
+ matrix, batches of such matrices, or sparse matrix.
196
+
197
+ This function returns a namedtuple ``(U, S, V)`` which is the
198
+ nearly optimal approximation of a singular value decomposition of
199
+ a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
200
+
201
+ .. note:: The relation of ``(U, S, V)`` to PCA is as follows:
202
+
203
+ - :math:`A` is a data matrix with ``m`` samples and
204
+ ``n`` features
205
+
206
+ - the :math:`V` columns represent the principal directions
207
+
208
+ - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
209
+ :math:`A^T A / (m - 1)` which is the covariance of
210
+ ``A`` when ``center=True`` is provided.
211
+
212
+ - ``matmul(A, V[:, :k])`` projects data to the first k
213
+ principal components
214
+
215
+ .. note:: Different from the standard SVD, the size of returned
216
+ matrices depend on the specified rank and q
217
+ values as follows:
218
+
219
+ - :math:`U` is m x q matrix
220
+
221
+ - :math:`S` is q-vector
222
+
223
+ - :math:`V` is n x q matrix
224
+
225
+ .. note:: To obtain repeatable results, reset the seed for the
226
+ pseudorandom number generator
227
+
228
+ Args:
229
+
230
+ A (Tensor): the input tensor of size :math:`(*, m, n)`
231
+
232
+ q (int, optional): a slightly overestimated rank of
233
+ :math:`A`. By default, ``q = min(6, m,
234
+ n)``.
235
+
236
+ center (bool, optional): if True, center the input tensor,
237
+ otherwise, assume that the input is
238
+ centered.
239
+
240
+ niter (int, optional): the number of subspace iterations to
241
+ conduct; niter must be a nonnegative
242
+ integer, and defaults to 2.
243
+
244
+ References::
245
+
246
+ - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
247
+ structure with randomness: probabilistic algorithms for
248
+ constructing approximate matrix decompositions,
249
+ arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
250
+ `arXiv <http://arxiv.org/abs/0909.4061>`_).
251
+
252
+ """
253
+
254
+ if not torch.jit.is_scripting():
255
+ if type(A) is not torch.Tensor and has_torch_function((A,)):
256
+ return handle_torch_function(
257
+ pca_lowrank, (A,), A, q=q, center=center, niter=niter
258
+ )
259
+
260
+ (m, n) = A.shape[-2:]
261
+
262
+ if q is None:
263
+ q = min(6, m, n)
264
+ elif not (q >= 0 and q <= min(m, n)):
265
+ raise ValueError(
266
+ f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}"
267
+ )
268
+ if not (niter >= 0):
269
+ raise ValueError(f"niter(={niter}) must be non-negative integer")
270
+
271
+ dtype = _utils.get_floating_dtype(A)
272
+
273
+ if not center:
274
+ return _svd_lowrank(A, q, niter=niter, M=None)
275
+
276
+ if _utils.is_sparse(A):
277
+ if len(A.shape) != 2:
278
+ raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
279
+ c = torch.sparse.sum(A, dim=(-2,)) / m
280
+ # reshape c
281
+ column_indices = c.indices()[0]
282
+ indices = torch.zeros(
283
+ 2,
284
+ len(column_indices),
285
+ dtype=column_indices.dtype,
286
+ device=column_indices.device,
287
+ )
288
+ indices[0] = column_indices
289
+ C_t = torch.sparse_coo_tensor(
290
+ indices, c.values(), (n, 1), dtype=dtype, device=A.device
291
+ )
292
+
293
+ ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
294
+ M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
295
+ return _svd_lowrank(A, q, niter=niter, M=M)
296
+ else:
297
+ C = A.mean(dim=(-2,), keepdim=True)
298
+ return _svd_lowrank(A - C, q, niter=niter, M=None)
venv/lib/python3.10/site-packages/torch/_meta_registrations.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/torch/_namedtensor_internals.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ """
4
+ This file contains helper functions that implement experimental functionality
5
+ for named tensors in python. All of these are experimental, unstable, and
6
+ subject to change or deletion.
7
+ """
8
+
9
+
10
+ def check_serializing_named_tensor(tensor):
11
+ if tensor.has_names():
12
+ raise RuntimeError(
13
+ "NYI: Named tensors don't support serialization. Please drop "
14
+ "names via `tensor = tensor.rename(None)` before serialization."
15
+ )
16
+
17
+
18
+ def build_dim_map(tensor):
19
+ """Returns a map of { dim: dim_name } where dim is a name if the dim is named
20
+ and the dim index otherwise."""
21
+ return OrderedDict(
22
+ [(idx if name is None else name, name) for idx, name in enumerate(tensor.names)]
23
+ )
24
+
25
+
26
+ def unzip_namedshape(namedshape):
27
+ if isinstance(namedshape, OrderedDict):
28
+ namedshape = namedshape.items()
29
+ if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple):
30
+ raise RuntimeError(
31
+ f"Expected namedshape to be OrderedDict or iterable of tuples, got: {type(namedshape)}"
32
+ )
33
+ if len(namedshape) == 0:
34
+ raise RuntimeError("Expected namedshape to non-empty.")
35
+ return zip(*namedshape)
36
+
37
+
38
+ def namer_api_name(inplace):
39
+ if inplace:
40
+ return "rename_"
41
+ else:
42
+ return "rename"
43
+
44
+
45
+ def is_ellipsis(item):
46
+ return item == Ellipsis or item == "..."
47
+
48
+
49
+ def single_ellipsis_index(names, fn_name):
50
+ ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
51
+ if len(ellipsis_indices) >= 2:
52
+ raise RuntimeError(
53
+ f"{fn_name}: More than one Ellipsis ('...') found in names ("
54
+ f"{names}). This function supports up to one Ellipsis."
55
+ )
56
+ if len(ellipsis_indices) == 1:
57
+ return ellipsis_indices[0]
58
+ return None
59
+
60
+
61
+ def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
62
+ return names[numel_pre_glob : len(names) - numel_post_glob]
63
+
64
+
65
+ def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
66
+ globbed_names = expand_single_ellipsis(
67
+ ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names
68
+ )
69
+ return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1 :]
70
+
71
+
72
+ def resolve_ellipsis(names, tensor_names, fn_name):
73
+ """
74
+ Expands ... inside `names` to be equal to a list of names from `tensor_names`.
75
+ """
76
+ ellipsis_idx = single_ellipsis_index(names, fn_name)
77
+ if ellipsis_idx is None:
78
+ return names
79
+ return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
80
+
81
+
82
+ def update_names_with_list(tensor, names, inplace):
83
+ # Special case for tensor.rename(None)
84
+ if len(names) == 1 and names[0] is None:
85
+ return tensor._update_names(None, inplace)
86
+
87
+ return tensor._update_names(
88
+ resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace
89
+ )
90
+
91
+
92
+ def update_names_with_mapping(tensor, rename_map, inplace):
93
+ dim_map = build_dim_map(tensor)
94
+ for old_dim in rename_map.keys():
95
+ new_dim = rename_map[old_dim]
96
+ if old_dim in dim_map.keys():
97
+ dim_map[old_dim] = new_dim
98
+ else:
99
+ raise RuntimeError(
100
+ f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim "
101
+ f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist"
102
+ )
103
+ return tensor._update_names(tuple(dim_map.values()), inplace)
104
+
105
+
106
+ def update_names(tensor, names, rename_map, inplace):
107
+ """There are two usages:
108
+
109
+ tensor.rename(*names) returns a view on tensor with named dims `names`.
110
+ `names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
111
+ then it is expanded greedily to be equal to the corresponding names from
112
+ `tensor.names`.
113
+
114
+ For example,
115
+ ```
116
+ >>> # xdoctest: +SKIP
117
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
118
+ >>> x.rename('...', 'height', 'width').names
119
+ ('N', 'C', 'height', 'width')
120
+
121
+ >>> # xdoctest: +SKIP
122
+ >>> x.rename('batch', '...', 'width').names
123
+ ('batch', 'C', 'H', 'width')
124
+
125
+ ```
126
+
127
+ tensor.rename(**rename_map) returns a view on tensor that has rename dims
128
+ as specified in the mapping `rename_map`.
129
+
130
+ For example,
131
+ ```
132
+ >>> # xdoctest: +SKIP
133
+ >>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
134
+ >>> x.rename(W='width', H='height').names
135
+ ('N', 'C', 'height', 'width')
136
+
137
+ ```
138
+
139
+ Finally, tensor.rename has an in-place version called tensor.rename_.
140
+ """
141
+ has_names = len(names) > 0
142
+ has_rename_pairs = bool(rename_map)
143
+ if has_names and has_rename_pairs:
144
+ raise RuntimeError(
145
+ f"{namer_api_name(inplace)}: This function takes either positional "
146
+ f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) "
147
+ f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename "
148
+ "dims."
149
+ )
150
+
151
+ # Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
152
+ if not has_names and not has_rename_pairs:
153
+ return update_names_with_list(tensor, names, inplace)
154
+
155
+ if has_names:
156
+ return update_names_with_list(tensor, names, inplace)
157
+ return update_names_with_mapping(tensor, rename_map, inplace)
venv/lib/python3.10/site-packages/torch/_ops.py ADDED
@@ -0,0 +1,1037 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import ctypes
3
+ import importlib
4
+ import inspect
5
+ import sys
6
+ import types
7
+ from typing import Any, Callable, Dict, Set, Type, Union
8
+
9
+ import torch._C
10
+ import torch.utils._pytree as pytree
11
+ from torch import _utils_internal
12
+ from torch._functorch.pyfunctorch import dispatch_functorch
13
+ from torch.utils._python_dispatch import TorchDispatchMode
14
+
15
+ # Query `hasattr` only once.
16
+
17
+ _SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
18
+
19
+
20
+ @contextlib.contextmanager
21
+ def dl_open_guard():
22
+ """
23
+ Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
24
+ shared library to load custom operators.
25
+ """
26
+ if not _SET_GLOBAL_FLAGS:
27
+ yield
28
+ return
29
+ old_flags = sys.getdlopenflags()
30
+ sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
31
+ try:
32
+ yield
33
+ finally:
34
+ sys.setdlopenflags(old_flags)
35
+
36
+
37
+ class OperatorBase:
38
+ """
39
+ Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
40
+ (which represents Python-only operators that are unrepresentable in TorchScript).
41
+ """
42
+
43
+ def __init__(self):
44
+ # The dispatch cache precomputes a mapping of dispatch key that the
45
+ # dispatcher wants to dispatch to, to an actual implementation of the
46
+ # dispatch key. Confusingly, the actual implementation could *also* be a
47
+ # dispatch key, but in this case, this refers to the C++ kernel that
48
+ # was registered to some dispatch key. Aliases are permitted in the
49
+ # latter but not the former; for example, you might lookup the
50
+ # entry for AutogradCPU, and this maps you to the Autograd key for
51
+ # the generic autograd kernel that works for all devices. Since this
52
+ # is the Python dispatcher, you can also put an arbitrary Python
53
+ # callable to call instead. This handler gets precisely the
54
+ # args/kwargs that the operator was __call__'ed with.
55
+ # NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
56
+ # for use with OpOverload; cache lookup is done entirely from C++
57
+ # for speed.
58
+ # TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
59
+ self._dispatch_cache: Dict[
60
+ torch._C.DispatchKey, Union[torch._C.DispatchKey, Callable[..., Any]]
61
+ ] = {}
62
+
63
+ # This table allows you to override the behavior of a particular
64
+ # dispatch key to call a custom Python function, rather than the
65
+ # ordinary C++ configured behavior. This is the raison d'etre of
66
+ # Python dispatcher: to let you program the dispatcher from Python
67
+ # in case you need something unusual, and don't want to clobber
68
+ # the existing registrations using the Python operator registration
69
+ # API.
70
+ self.py_kernels: Dict[torch._C.DispatchKey, Callable[..., Any]] = {}
71
+
72
+ # This table allows you to override the behavior of a particular
73
+ # operator for a particular TorchDispatchMode. In practice,
74
+ # we are using this mostly for ProxyTensorMode. Modes can be
75
+ # thought of as an open world extension of dispatch keys, so it
76
+ # makes sense that you should be able to register them, the same
77
+ # way you can register dispatch keys.
78
+ self.python_key_mode_table: Dict[
79
+ Type[TorchDispatchMode], Callable[..., Any]
80
+ ] = {}
81
+
82
+ # This table allows you to override the behavior of functorch
83
+ # transformations. NB: this currently only does something for
84
+ # HigherOrderOperator
85
+ self.functorch_table = {}
86
+
87
+ def __call__(self, *args, **kwargs):
88
+ raise NotImplementedError()
89
+
90
+ def has_kernel_for_dispatch_key(self, k):
91
+ return k in self.py_kernels
92
+
93
+ def has_kernel_for_any_dispatch_key(self, ks):
94
+ for k in self.py_kernels:
95
+ if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
96
+ return True
97
+ return False
98
+
99
+ def py_impl(self, k):
100
+ def inner(fn):
101
+ if inspect.isclass(k) and issubclass(k, TorchDispatchMode):
102
+ assert k not in self.python_key_mode_table
103
+ # TODO(voz): Should we replace setting torch._C.DispatchKey.Python entirely with setting mode keys?
104
+ self.python_key_mode_table[k] = fn
105
+ self._dispatch_cache.clear()
106
+ return fn
107
+
108
+ if isinstance(k, torch._C._functorch.TransformType):
109
+ assert k not in self.functorch_table
110
+ self.functorch_table[k] = fn
111
+ return fn
112
+
113
+ assert isinstance(k, torch._C.DispatchKey)
114
+ assert (
115
+ k != torch._C.DispatchKey.Python
116
+ ), "Please register a mode for the torch._C.DispatchKey.Python key instead."
117
+
118
+ if k in self.py_kernels:
119
+ raise RuntimeError(
120
+ f"Trying to override a python impl for {k} on operator {self.name()}"
121
+ )
122
+ self.py_kernels[k] = fn
123
+ self._dispatch_cache.clear()
124
+ return fn
125
+
126
+ return inner
127
+
128
+ # Registers an implementation to all **3** variants of functionalization that we have:
129
+ # - DispatchKey.Functionalize
130
+ # - functorch.TransformType.Functionalize
131
+ # - FunctionalTensorMode
132
+ # Example:
133
+ # @py_functionalize_impl
134
+ # def functionalize_rule(ctx, inner_f, *args):
135
+ # args_unwrapped = ctx.unwrap_tensors(args)
136
+ # with ctx.redispatch_to_next():
137
+ # out = ctx.functionalize(inner_f)(*args_unwrapped)
138
+ # return ctx.wrap_tensors(out)
139
+ def py_functionalize_impl(self, fn):
140
+ from torch._subclasses.functional_tensor import (
141
+ CppFunctionalizeAPI as _CppFunctionalizeAPI,
142
+ FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
143
+ PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
144
+ )
145
+
146
+ # Construct our three flavors of functionalization,
147
+ # each of which have slightly different wrap/unwrap/redispatch policies
148
+ def functionalize_dk_fn(*args, **kwargs):
149
+ return fn(_CppFunctionalizeAPI(), *args, **kwargs)
150
+
151
+ def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
152
+ return fn(_PythonFunctionalizeAPI(mode), *args, **kwargs)
153
+
154
+ def functionalize_functorch_fn(interpreter, *args, **kwargs):
155
+ return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
156
+
157
+ self.py_impl(torch._C.DispatchKey.Functionalize)(functionalize_dk_fn)
158
+ self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
159
+ functionalize_dispatch_mode_fn
160
+ )
161
+ self.py_impl(torch._C._functorch.TransformType.Functionalize)(
162
+ functionalize_functorch_fn
163
+ )
164
+
165
+ return fn
166
+
167
+ def name(self):
168
+ raise NotImplementedError()
169
+
170
+
171
+ is_included_in_alias = torch._C._dispatch_is_included_in_alias
172
+
173
+ DispatchKey = torch._C.DispatchKey
174
+
175
+
176
+ # Equivalent to computeDispatchTableEntryWithDebug
177
+ def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
178
+ # 1. (Direct) operator registration
179
+ if op.has_kernel_for_dispatch_key(k):
180
+ return k
181
+ # 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
182
+ cand = DispatchKey.CompositeExplicitAutogradNonFunctional
183
+ if (
184
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
185
+ ) and op.has_kernel_for_dispatch_key(cand):
186
+ return cand
187
+ # 2.2 Use CompositeExplicitAutograd kernel if available
188
+ cand = DispatchKey.CompositeExplicitAutograd
189
+ if (
190
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
191
+ ) and op.has_kernel_for_dispatch_key(cand):
192
+ return cand
193
+ has_backend_kernel = op.has_kernel_for_any_dispatch_key(
194
+ torch._C._dispatch_get_backend_keyset_from_autograd(k)
195
+ ) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
196
+ # 2.3. Use CompositeImplicitAutograd kernel if available
197
+ cand = DispatchKey.CompositeImplicitAutogradNestedTensor
198
+ if (
199
+ (k != DispatchKey.Undefined and is_included_in_alias(k, cand))
200
+ and op.has_kernel_for_dispatch_key(cand)
201
+ and not has_backend_kernel
202
+ ):
203
+ return cand
204
+ cand = DispatchKey.CompositeImplicitAutograd
205
+ if (
206
+ k == DispatchKey.Undefined or is_included_in_alias(k, cand)
207
+ ) and op.has_kernel_for_dispatch_key(cand):
208
+ if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
209
+ torch._C._dispatch_autogradother_backends
210
+ ):
211
+ raise RuntimeError("ambiguous autogradother kernel")
212
+ elif not has_backend_kernel:
213
+ return cand
214
+ # 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
215
+ cand = DispatchKey.Autograd
216
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
217
+ return cand
218
+ # 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
219
+ cand = DispatchKey.FuncTorchBatchedDecomposition
220
+ if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
221
+ return cand
222
+ # Backend fallback
223
+ if torch._C._dispatch_has_backend_fallback(k):
224
+ # The dispatch key itself will implicitly route to backend fallback.
225
+ # This is probably not great for the pure Python implementation.
226
+ return k
227
+ raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
228
+
229
+
230
+ _higher_order_ops: Dict[str, "HigherOrderOperator"] = {}
231
+
232
+ _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
233
+ DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
234
+ DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
235
+ DispatchKey.ADInplaceOrView,
236
+ DispatchKey.BackendSelect,
237
+ DispatchKey.AutocastCPU, # type: ignore[attr-defined]
238
+ DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
239
+ ]
240
+
241
+
242
+ class HigherOrderOperator(OperatorBase):
243
+ # The HigherOrderOperator will appear as torch.ops.higher_order.{name}
244
+ #
245
+ # If you're creating a new HigherOrderOperator, please do not change the
246
+ # default. Adding operators to the global torch.ops namespace is a bad
247
+ # practice due to name collisions.
248
+ def __init__(self, name):
249
+ super().__init__()
250
+ self._name = name
251
+
252
+ # Make _OPNamespace not scream, this whole name based association needs a good hard look
253
+ self.__name__ = name
254
+ _higher_order_ops[name] = self
255
+ self._ns = "higher_order"
256
+
257
+ # For a normal HigherOrderOperator instance, we will change its __module__ from torch._ops to
258
+ # torch._ops.higher_order.
259
+ # For an instance of subclass of HigherOrderOperator (e.g. customized higher order op),
260
+ # the __module__ attribute will be kept unchanged.
261
+ if self.__class__ is HigherOrderOperator:
262
+ self_name_space = "." + self.namespace if self.namespace else ""
263
+ self.__module__ = self.__module__ + self_name_space
264
+ self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
265
+
266
+ for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
267
+ self.fallthrough(dispatch_key)
268
+
269
+ # [NOTE] We have to register pre-dispatch key implementation
270
+ # because sometimes HOP use aot-dispatch tracing to detect certaion
271
+ # mutations. This is problematic when we are functionalizing HOP
272
+ # during pre-dispatch because when the inner tracer starts, it will see
273
+ # that PreDispatch key is still active. In that case, we just redispatch
274
+ # it to next key. This is only safe to do when PreDispatch key stack has no
275
+ # active modes.
276
+ # TODO (tmanlaibaatar) Make it generic fallback mechanism
277
+ def _(*args, **kwargs):
278
+ if _len_torch_dispatch_stack_pre_dispatch() == 0:
279
+ with torch._C._ExcludeDispatchKeyGuard(
280
+ torch._C.DispatchKeySet(DispatchKey.PreDispatch)
281
+ ):
282
+ return self(*args, **kwargs)
283
+ raise AssertionError(
284
+ """
285
+ Can't directly invoke HOP implementation at PreDispatch key
286
+ if there are active modes on PreDispatch mode stack.
287
+ """
288
+ )
289
+
290
+ self.py_impl(torch._C.DispatchKey.PreDispatch)(_)
291
+
292
+ def py_impl(self, k):
293
+ if isinstance(k, torch._C.DispatchKey) and not self.non_fallthrough_keys.has(k):
294
+ self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
295
+ return super().py_impl(k)
296
+
297
+ @property
298
+ def namespace(self):
299
+ return self._ns
300
+
301
+ def fallthrough(self, dispatch_key):
302
+ self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
303
+
304
+ def dispatch(self, dispatch_key, *args, **kwargs):
305
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
306
+
307
+ if dispatch_key in self._dispatch_cache:
308
+ kernel = self._dispatch_cache[dispatch_key]
309
+ assert not isinstance(kernel, torch._C.DispatchKey)
310
+ return kernel(*args, **kwargs)
311
+
312
+ if dispatch_key == torch._C.DispatchKey.FuncTorchDynamicLayerFrontMode:
313
+ return dispatch_functorch(self, args, kwargs)
314
+
315
+ if dispatch_key == torch._C.DispatchKey.Python:
316
+ # The place to handle ProxyTorchDispatchMode, FakeTensorMode, etc
317
+ from torch.utils._python_dispatch import _pop_mode_temporarily
318
+
319
+ curr_mode = _get_current_dispatch_mode()
320
+ assert (
321
+ curr_mode is not None
322
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
323
+ assert (
324
+ type(curr_mode) in self.python_key_mode_table
325
+ ), f"Current active mode {curr_mode} not registered"
326
+ handler = self.python_key_mode_table[type(curr_mode)]
327
+ with _pop_mode_temporarily() as mode:
328
+ return handler(mode, *args, **kwargs)
329
+
330
+ functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
331
+ if functionality_key == torch._C.DispatchKey.PreDispatch:
332
+ from torch.utils._python_dispatch import _pop_mode_temporarily
333
+
334
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
335
+ # calls inside of a mode.
336
+ if (
337
+ _len_torch_dispatch_stack_pre_dispatch() > 0
338
+ ) and not torch._C._dispatch_tls_is_dispatch_key_excluded(
339
+ DispatchKey.Python
340
+ ):
341
+ curr_mode = _get_current_dispatch_mode_pre_dispatch()
342
+ assert (
343
+ curr_mode is not None
344
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.PreDispatch without a mode."
345
+ assert (
346
+ type(curr_mode) in self.python_key_mode_table
347
+ ), f"Current active mode {curr_mode} not registered"
348
+ handler = self.python_key_mode_table[type(curr_mode)]
349
+ with _pop_mode_temporarily(functionality_key) as mode:
350
+ return handler(mode, *args, **kwargs)
351
+
352
+ final_key = resolve_key(self, dispatch_key)
353
+
354
+ # This can current fail due to backend fallbacks. You just have to
355
+ # register them by hand for HigherOrderOperator.
356
+ if final_key not in self.py_kernels:
357
+ raise NotImplementedError(
358
+ f"could not find kernel for HigherOrderOperator {self._name} "
359
+ f"at dispatch key {final_key} (resolved from {dispatch_key})"
360
+ )
361
+ self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
362
+ kernel = self.py_kernels[final_key]
363
+ # It's illegal to register DispatchKey to py_kernels, since there's no
364
+ # C++ kernel to call into
365
+ assert not isinstance(kernel, torch._C.DispatchKey)
366
+ return kernel(*args, **kwargs)
367
+
368
+ def __call__(self, *args, **kwargs):
369
+ # Dynamo already traces the body of HigherOrderOp beforehand when it
370
+ # so no need to trace into it.
371
+ import torch._dynamo
372
+ from torch._dynamo import disable
373
+
374
+ @disable
375
+ def wrapper():
376
+ flat_args = _to_flat_tuple(args, kwargs)
377
+ if torch.overrides.has_torch_function(flat_args):
378
+ return torch.overrides.handle_torch_function(
379
+ self, flat_args, *args, **kwargs
380
+ )
381
+
382
+ dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
383
+ return self.dispatch(
384
+ dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
385
+ )
386
+
387
+ return wrapper()
388
+
389
+ def __str__(self):
390
+ return f"{self.name()}"
391
+
392
+ def name(self):
393
+ return self._name
394
+
395
+
396
+ def _to_flat_tuple(args, kwargs):
397
+ return pytree.arg_tree_leaves(*args, **kwargs)
398
+
399
+
400
+ def _compute_keyset(args, kwargs, non_fallthrough_keys):
401
+ tensors = _get_tensors(args, kwargs)
402
+ return key_extractor(tensors, non_fallthrough_keys)
403
+
404
+
405
+ def _get_tensors(args, kwargs):
406
+ flat_all = _to_flat_tuple(args, kwargs)
407
+ tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
408
+ return tuple(tensor_args)
409
+
410
+
411
+ # Note - this should maintain identical impl to the C++ dispatcher key extraction logic
412
+ # at ATen/core/dispatch/DispatchKeyExtractor.h
413
+ def key_extractor(tensors, key_mask):
414
+ key_set = torch._C._dispatch_tls_local_include_set()
415
+ for tensor in tensors:
416
+ key_set = key_set | torch._C._dispatch_keys(tensor)
417
+ key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
418
+ key_set = key_set & key_mask
419
+ return key_set
420
+
421
+
422
+ # Mode stack for PreDispatchKey
423
+ # it should always have two keys with
424
+ # priority given to FunctionalTensorMode and
425
+ # then ProxyTorchDispatchMode. It means that
426
+ # slot 0 belongs to ProxyTorchDispatchMode and
427
+ # slot 1 belongs to FunctionalTensorMode.
428
+ class _ModeStackStateForPreDispatch:
429
+ def __init__(self):
430
+ self.__infra_modes = [None, None]
431
+
432
+ def set(self, index, mode):
433
+ assert index < len(self.__infra_modes)
434
+ self.__infra_modes[index] = mode
435
+
436
+ def get(self, index):
437
+ assert index < len(self.__infra_modes)
438
+ return self.__infra_modes[index]
439
+
440
+ def count(self):
441
+ return len([i for i in self.__infra_modes if i is not None])
442
+
443
+
444
+ _mode_stack_state_for_pre_dispatch = _ModeStackStateForPreDispatch()
445
+
446
+
447
+ def unset_mode_pre_dispatch(mode_key):
448
+ current_mode_stack_pre_dispatch = mode_stack_state_for_pre_dispatch()
449
+ assert mode_key in (
450
+ torch._C._TorchDispatchModeKey.PROXY,
451
+ torch._C._TorchDispatchModeKey.FUNCTIONAL,
452
+ )
453
+ if mode_key == torch._C._TorchDispatchModeKey.PROXY:
454
+ current_mode = current_mode_stack_pre_dispatch.get(0)
455
+ mode_stack_state_for_pre_dispatch().set(0, None)
456
+ return current_mode
457
+ else:
458
+ current_mode = current_mode_stack_pre_dispatch.get(1)
459
+ mode_stack_state_for_pre_dispatch().set(1, None)
460
+ return current_mode
461
+
462
+
463
+ def _set_mode_pre_dispatch(mode):
464
+ from torch._subclasses.functional_tensor import FunctionalTensorMode
465
+ from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
466
+
467
+ assert isinstance(mode, (FunctionalTensorMode, ProxyTorchDispatchMode))
468
+ if isinstance(mode, FunctionalTensorMode):
469
+ current_mode = mode_stack_state_for_pre_dispatch().get(1)
470
+ assert current_mode is None
471
+ mode_stack_state_for_pre_dispatch().set(1, mode)
472
+ return
473
+
474
+ current_mode = mode_stack_state_for_pre_dispatch().get(0)
475
+ assert current_mode is None
476
+ mode_stack_state_for_pre_dispatch().set(0, mode)
477
+
478
+
479
+ def _pop_mode_from_pre_dispatch():
480
+ mode_stack = mode_stack_state_for_pre_dispatch()
481
+ if mode_stack.get(1) is not None:
482
+ res = mode_stack.get(1)
483
+ mode_stack.set(1, None)
484
+ return res
485
+
486
+ if mode_stack.get(0) is not None:
487
+ res = mode_stack.get(0)
488
+ mode_stack.set(0, None)
489
+ return res
490
+
491
+ raise AssertionError("Trying to pop empty mode stack")
492
+
493
+
494
+ def _len_torch_dispatch_stack_pre_dispatch():
495
+ return mode_stack_state_for_pre_dispatch().count()
496
+
497
+
498
+ def _get_dispatch_mode_pre_dispatch(mode_key):
499
+ assert mode_key in (
500
+ torch._C._TorchDispatchModeKey.PROXY,
501
+ torch._C._TorchDispatchModeKey.FUNCTIONAL,
502
+ )
503
+ if mode_key == torch._C._TorchDispatchModeKey.PROXY:
504
+ return mode_stack_state_for_pre_dispatch().get(0)
505
+ return mode_stack_state_for_pre_dispatch().get(1)
506
+
507
+
508
+ def _get_current_dispatch_mode_pre_dispatch():
509
+ stack_len = mode_stack_state_for_pre_dispatch().count()
510
+ if stack_len == 2:
511
+ return mode_stack_state_for_pre_dispatch().get(1)
512
+ if stack_len == 1:
513
+ return (
514
+ mode_stack_state_for_pre_dispatch().get(1)
515
+ if mode_stack_state_for_pre_dispatch().get(1) is not None
516
+ else mode_stack_state_for_pre_dispatch().get(0)
517
+ )
518
+ return None
519
+
520
+
521
+ def mode_stack_state_for_pre_dispatch():
522
+ global _mode_stack_state_for_pre_dispatch
523
+ return _mode_stack_state_for_pre_dispatch
524
+
525
+
526
+ cached_ops: Set["OpOverload"] = set()
527
+
528
+
529
+ def add_cached_op(op_overload):
530
+ global cached_ops
531
+ cached_ops.add(op_overload)
532
+
533
+
534
+ def reset_cached_ops():
535
+ global cached_ops
536
+ cached_ops.clear()
537
+
538
+
539
+ def get_cached_ops():
540
+ global cached_ops
541
+ return cached_ops
542
+
543
+
544
+ # Each OpOverload object contains pointer to a a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
545
+ # You can obtain an OpOverload object through attribute query on OpOverloadPacket.
546
+ class OpOverload(OperatorBase):
547
+ def __init__(self, overloadpacket, op, op_dk, schema, tags):
548
+ super().__init__()
549
+ self._op = op
550
+ self._op_dk = op_dk
551
+ self._schema = schema
552
+ self._overloadpacket = overloadpacket
553
+ self._tags = tags
554
+ self._overloadname = (
555
+ "default" if schema.overload_name == "" else schema.overload_name
556
+ )
557
+ self._name = self._schema.name
558
+ if schema.overload_name:
559
+ self._name += "." + schema.overload_name
560
+ self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
561
+ self.__module__ = overloadpacket.__module__
562
+ op.__module__ = overloadpacket.__module__
563
+ self.__qualname__ = self._name
564
+ self.__annotations__ = {}
565
+
566
+ # If the OpOverload was constructed from a Library.def in Python.
567
+ self._defined_in_python = self.__qualname__ in torch.library._defs
568
+
569
+ # Logic replicated from aten/src/ATen/native/MathBitsFallback.h
570
+ is_write = None
571
+ for a in self._schema.arguments:
572
+ if a.alias_info is None:
573
+ continue
574
+ if is_write is None:
575
+ is_write = a.alias_info.is_write
576
+ else:
577
+ # We will conservatively call mixed mutable/non-mutable
578
+ # aliased inputs as NOT a view
579
+ is_write = a.alias_info.is_write or is_write
580
+ self.is_view = is_write is not None and not is_write
581
+
582
+ # it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
583
+ def __deepcopy__(self, memo=None):
584
+ return self
585
+
586
+ def __repr__(self):
587
+ return "<OpOverload(op='{}.{}', overload='{}')>".format(
588
+ *self._schema.name.split("::"), self._overloadname
589
+ )
590
+
591
+ def __call__(self_, *args, **kwargs): # noqa: B902
592
+ # use `self_` to avoid naming collide with aten ops arguments that
593
+ # are named "self". This way, all the aten ops can be called by kwargs.
594
+ return self_._op(*args, **kwargs)
595
+
596
+ def __hash__(self):
597
+ return hash(self._op)
598
+
599
+ # `my_namespace.my_op_name.overload_name`
600
+ def __str__(self):
601
+ return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
602
+
603
+ def has_kernel_for_dispatch_key(self, k):
604
+ return super().has_kernel_for_dispatch_key(
605
+ k
606
+ ) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
607
+
608
+ def has_kernel_for_any_dispatch_key(self, ks):
609
+ return torch._C._dispatch_has_kernel_for_any_dispatch_key(
610
+ self.name(), ks
611
+ ) or super().has_kernel_for_any_dispatch_key(ks)
612
+
613
+ @property
614
+ def namespace(self):
615
+ return self._schema.name.split("::")[0]
616
+
617
+ def _handle(self):
618
+ return torch._C._dispatch_find_schema_or_throw(
619
+ self._schema.name, self._schema.overload_name
620
+ )
621
+
622
+ def decompose(self, *args, **kwargs):
623
+ dk = torch._C.DispatchKey.CompositeImplicitAutograd
624
+ if dk in self.py_kernels:
625
+ # NB: This branch is not too necessary anymore, because we can
626
+ # apply Python CompositeImplicitAutograd *before* tracing
627
+ # using Python dispatcher (also taking advantage of the autograd
628
+ # formula). But it's included for completeness
629
+ return self.py_kernels[dk](*args, **kwargs)
630
+ elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
631
+ return self._op_dk(dk, *args, **kwargs)
632
+ else:
633
+ return NotImplemented
634
+
635
+ # Remove a dispatch key from the dispatch cache. This will force it to get
636
+ # recomputed the next time. Does nothing
637
+ # WARNING: if you register a dispatch key to py_kernels of an OpOverload,
638
+ # calling _del_dispatch on that key is NOT sufficient to apply your change,
639
+ # because a single registration may affect MULTIPLE dispatch keys (e.g.,
640
+ # registering Autograd affects AutogradCPU). del_dispatch is to be used
641
+ # only if you are specifically modifying how get_dispatch handles a
642
+ # particular input 'key'.
643
+ def _uncache_dispatch(self, key):
644
+ self._dispatch_cache.pop(key, None)
645
+
646
+ # This implements the pre-computation logic for the Python dispatcher.
647
+ def _get_dispatch(self, key):
648
+ # This is only called upon a cache miss
649
+ assert key not in self._dispatch_cache, f"{self} {key}"
650
+
651
+ if key == torch._C.DispatchKey.Python:
652
+ if not self.python_key_mode_table:
653
+ self._dispatch_cache[key] = key
654
+ add_cached_op(self)
655
+ return key
656
+
657
+ def handler(*args, **kwargs):
658
+ from torch.utils._python_dispatch import _get_current_dispatch_mode
659
+
660
+ # TODO: We also need to handle tensor subclasses here
661
+ # TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
662
+ curr_mode = type(_get_current_dispatch_mode())
663
+ assert (
664
+ curr_mode is not None
665
+ ), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
666
+ if curr_mode not in self.python_key_mode_table:
667
+ # TODO: This path is slow, should generally encourage this
668
+ # case to not happen
669
+ return self._op_dk(key, *args, **kwargs)
670
+ # TODO(voz): The idea behind this is that we do not yet support dispatch by key + mode, only key.
671
+ return self.python_key_mode_table[curr_mode](*args, **kwargs)
672
+
673
+ self._dispatch_cache[key] = handler
674
+ add_cached_op(self)
675
+ return handler
676
+
677
+ functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
678
+ if functionality_key == torch._C.DispatchKey.PreDispatch:
679
+ curr_stack_len = _len_torch_dispatch_stack_pre_dispatch()
680
+ # The check for Python in the exclude set is so we properly respect `with no_dispatch()`
681
+ # calls inside of a mode.
682
+ if (
683
+ curr_stack_len > 0
684
+ and not torch._C._dispatch_tls_is_dispatch_key_excluded(
685
+ DispatchKey.Python
686
+ )
687
+ ):
688
+
689
+ def handler(*args, **kwargs):
690
+ @contextlib.contextmanager
691
+ def _temporarily_pop_modes_from_pre_dispatch():
692
+ top_mode = _pop_mode_from_pre_dispatch()
693
+ try:
694
+ yield top_mode
695
+ finally:
696
+ _set_mode_pre_dispatch(top_mode)
697
+
698
+ with _temporarily_pop_modes_from_pre_dispatch() as curr_mode:
699
+ assert isinstance(curr_mode, TorchDispatchMode)
700
+ overload_types = []
701
+ args_flattened, _ = torch.utils._pytree.tree_flatten(
702
+ (args, kwargs.values())
703
+ )
704
+ for a in args_flattened:
705
+ # TODO: need to double check the semantics of the "types" argument to torch_dispatch.
706
+ # It's generated in PyInterpreter.cpp, but seems to be generated in two places,
707
+ # where in one case we only include tensors with the python key, and in another
708
+ # we include **all** tensors.
709
+ if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(
710
+ a
711
+ ).has(torch._C.DispatchKey.Python):
712
+ overload_types.append(type(a))
713
+ # TODO: check that I got these args correct (in C++, we pass in "0000"??)
714
+
715
+ return curr_mode.__torch_dispatch__(
716
+ self, overload_types, args, kwargs
717
+ )
718
+
719
+ # Note [Not Caching Per-Dispatch-Key Mode Handlers]
720
+ # Note that we're not caching this handler. There isn't really a point, since the slow bit
721
+ # is the handler itself (in python).
722
+ # Also, not caching means that we don't have to reset the cache when any existing
723
+ # modes go out of scope (which in of itself takes time to loop through all operators).
724
+ return handler
725
+
726
+ final_key = resolve_key(self, key)
727
+
728
+ # See Note [Not Caching Per-Dispatch-Key Mode Handlers]
729
+ cache_result = key != torch._C.DispatchKey.PreDispatch
730
+
731
+ # TODO: We could potentially have lots of debugging wrappers against
732
+ # dispatch keys; design some general registration mechanism instead of
733
+ # having if statement for each of them
734
+ if key == torch._C.DispatchKey.Functionalize:
735
+ import torch._dispatch.python as pydispatch
736
+
737
+ if pydispatch.CROSSREF_FUNCTIONALIZE:
738
+ handler = pydispatch.make_crossref_functionalize(self, final_key)
739
+ if cache_result:
740
+ self._dispatch_cache[key] = handler
741
+ add_cached_op(self)
742
+ return handler
743
+
744
+ # print(self, key, final_key)
745
+ r = self.py_kernels.get(final_key, final_key)
746
+ if cache_result:
747
+ self._dispatch_cache[key] = r
748
+ add_cached_op(self)
749
+ return r
750
+
751
+ def name(self):
752
+ return self._name
753
+
754
+ @property
755
+ def overloadpacket(self):
756
+ return self._overloadpacket
757
+
758
+ @property
759
+ def op(self):
760
+ return self._op
761
+
762
+ @property
763
+ def tags(self):
764
+ return self._tags
765
+
766
+ # TODO: add more methods to expose information about input and output arguments
767
+
768
+
769
+ # OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
770
+ # You can obtain an OpOverload object through attribute query.
771
+ class OpOverloadPacket:
772
+ def __init__(self, qualified_op_name, op_name, op, overload_names):
773
+ # These attributes are accessible on the object through the properties
774
+ # defined below but are immutable
775
+ self._qualified_op_name = qualified_op_name
776
+ self.__name__ = op_name
777
+ self._op = op
778
+ self._overload_names = overload_names
779
+ self._dir = []
780
+
781
+ # it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
782
+ def __deepcopy__(self, memo=None):
783
+ return self
784
+
785
+ def __repr__(self):
786
+ return "<OpOverloadPacket(op='{}.{}')>".format(
787
+ *self._qualified_op_name.split("::")
788
+ )
789
+
790
+ def __hash__(self):
791
+ return hash(self._op)
792
+
793
+ def __str__(self):
794
+ return "{}.{}".format(*self._qualified_op_name.split("::"))
795
+
796
+ @property
797
+ def op(self):
798
+ return self._op
799
+
800
+ def __getattr__(self, key):
801
+ # It is not a valid op_name when __file__ is passed in
802
+ if key == "__file__":
803
+ return "torch.ops"
804
+
805
+ # ensure that query for dunder attributes that does not exist on
806
+ # opoverloadpacket but instead exists on the self._op object does not unnecessarily call
807
+ # `_get_operation_overload` (which is an expensive operation).
808
+ # This is done to prevent any potential slowdown. This list can be extended
809
+ # if there exists other attributes like `__name__` that only exist on self._op and not on the
810
+ # opoverloadpacket.
811
+ # This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
812
+ try:
813
+ if key.startswith("__"):
814
+ return getattr(self._op, key)
815
+ except AttributeError:
816
+ # for consistency because it seems weird to
817
+ # throw an attribute error with a message containing
818
+ # an object name different from the one the attribute
819
+ # query was performed on.
820
+ raise AttributeError(
821
+ f"'{str(self)}' can't have an overload name beginning with '__' and the "
822
+ f"underlying op {str(self._op)} has no attribute {key} either."
823
+ ) from None
824
+
825
+ try:
826
+ # This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
827
+ use_key = "" if key == "default" else key
828
+ # TODO: disallow access to overloads registered by JIT
829
+ op_, op_dk_, tags = torch._C._get_operation_overload(
830
+ self._qualified_op_name, use_key
831
+ )
832
+ schema = torch._C._get_schema(self._qualified_op_name, use_key)
833
+ overload = OpOverload(self, op_, op_dk_, schema, tags)
834
+ # cache the overload object
835
+ setattr(self, key, overload)
836
+ self._dir.append(key)
837
+ return overload
838
+ except RuntimeError:
839
+ raise AttributeError(
840
+ f"The underlying op of '{str(self)}' has no overload name '{key}'"
841
+ ) from None
842
+
843
+ def __iter__(self):
844
+ return iter(self._dir)
845
+
846
+ def __call__(self_, *args, **kwargs): # noqa: B902
847
+ # use `self_` to avoid naming collide with aten ops arguments that
848
+ # named "self". This way, all the aten ops can be called by kwargs.
849
+
850
+ # overloading __call__ to ensure torch.ops.foo.bar()
851
+ # is still callable from JIT
852
+ # We save the function ptr as the `op` attribute on
853
+ # OpOverloadPacket to access it here.
854
+ return self_._op(*args, **(kwargs or {}))
855
+
856
+ # TODO: use this to make a __dir__
857
+ def overloads(self):
858
+ return [n if n else "default" for n in self._overload_names]
859
+
860
+
861
+ # Resolution of torch.fn is different from torch.ops.aten.fn
862
+ # torch.fn uses the Python argparser, matches with the
863
+ # appropriate schema, and calls into the unboxed version of the method
864
+ # torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
865
+ # JIT creates a stack of all the overloads and then tries to match the
866
+ # correct one at runtime and always calls into the boxed version of the method
867
+ # Autograd codegen creates VariableType, TracerType,
868
+ # inplace or view type and python bindings.
869
+ # Aten codegen generates tensor methods for the tensor class.
870
+
871
+ # _OpNamespace is a subclass of ModuleType because the torch script
872
+ # allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
873
+ # to work from script, we need to ensure ops and foo are modules
874
+
875
+
876
+ class _OpNamespace(types.ModuleType):
877
+ """
878
+ An op namespace to dynamically bind Operators into Python.
879
+
880
+ Say a user has created a custom Operator called "my_namespace::my_op". To
881
+ call this op, the user will write torch.ops.my_namespace.my_op(...).
882
+ At startup, this operation will not yet be bound into Python. Instead, the
883
+ following sequence of magic tricks will occur:
884
+ 1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
885
+ on the `torch.ops` object, which will create a new `_OpNamespace`
886
+ object called `my_namespace` and set it as an attribute on the `ops`
887
+ object.
888
+ 2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
889
+ the `my_namespace` object, which will retrieve the operation via
890
+ `torch.get_operation`, a function bound from C++, and then in a similar
891
+ fashion bind this new object onto the `my_namespace` object.
892
+ 3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
893
+ and subsequent accesses will incur no further lookup (the namespace and
894
+ operation will already exist).
895
+ """
896
+
897
+ def __init__(self, name):
898
+ super().__init__("torch.ops." + name)
899
+ self.name = name
900
+ self._dir = []
901
+
902
+ def __iter__(self):
903
+ return iter(self._dir)
904
+
905
+ def __getattr__(self, op_name):
906
+ # It is not a valid op_name when __file__ is passed in
907
+ if op_name == "__file__":
908
+ return "torch.ops"
909
+ elif op_name in ["__origin__", "__self__"]:
910
+ raise AttributeError(
911
+ f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
912
+ )
913
+
914
+ # Get the op `my_namespace::my_op` if available. This will also check
915
+ # for overloads and raise an exception if there are more than one.
916
+ namespace_name = self.name
917
+ qualified_op_name = f"{namespace_name}::{op_name}"
918
+ try:
919
+ op, overload_names = torch._C._jit_get_operation(qualified_op_name)
920
+ if op is None:
921
+ raise AttributeError(
922
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
923
+ )
924
+ except RuntimeError as e:
925
+ # Turn this into AttributeError so getattr(obj, key, default)
926
+ # works (this is called by TorchScript with __origin__)
927
+ raise AttributeError(
928
+ f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
929
+ ) from e
930
+
931
+ # let the script frontend know that op is identical to the builtin op
932
+ # with qualified_op_name
933
+ torch.jit._builtins._register_builtin(op, qualified_op_name)
934
+ op.__module__ = self.__module__ + "." + namespace_name
935
+ opoverloadpacket = OpOverloadPacket(
936
+ qualified_op_name, op_name, op, overload_names
937
+ )
938
+ opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
939
+ # cache the opoverloadpacket to ensure that each op corresponds to
940
+ # a unique OpOverloadPacket object
941
+ setattr(self, op_name, opoverloadpacket)
942
+ self._dir.append(op_name)
943
+ return opoverloadpacket
944
+
945
+
946
+ class _PyOpNamespace(_OpNamespace):
947
+ def __init__(self, name, ops):
948
+ super().__init__(name)
949
+ self._ops = ops
950
+
951
+ def __getattr__(self, name):
952
+ # Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
953
+ op = self._ops.get(name, None)
954
+ if op is None:
955
+ raise AttributeError(
956
+ f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
957
+ )
958
+ setattr(self, name, op)
959
+ return op
960
+
961
+
962
+ class _Ops(types.ModuleType):
963
+ __file__ = "_ops.py"
964
+
965
+ def __init__(self):
966
+ super().__init__("torch.ops")
967
+ self.loaded_libraries = set()
968
+ self._higher_order_op_namespace = _PyOpNamespace(
969
+ "torch.ops.higher_order", _higher_order_ops
970
+ )
971
+ self._dir = []
972
+
973
+ def __getattr__(self, name):
974
+ # Check if the name is a HigherOrderOperator
975
+ if name == "higher_order":
976
+ return self._higher_order_op_namespace
977
+
978
+ # Here we are creating `torch.ops.my_namespace`
979
+ namespace = _OpNamespace(name)
980
+ setattr(self, name, namespace)
981
+ self._dir.append(name)
982
+ return namespace
983
+
984
+ def __iter__(self):
985
+ return iter(self._dir)
986
+
987
+ def import_module(self, module):
988
+ """
989
+ Imports a Python module that has torch.library registrations.
990
+
991
+ Generally, to extend PyTorch with custom operators, a user will
992
+ create a Python module whose import triggers registration of
993
+ the custom operators via a torch.ops.load_library call or a call
994
+ to one or more torch.library.* APIs.
995
+
996
+ It is unexpected for Python modules to have side effects, so some
997
+ linters and formatters will complain. Use this API to import Python
998
+ modules that contain these torch.library side effects.
999
+
1000
+ Args:
1001
+ module (str): The name of the Python module to import
1002
+
1003
+ """
1004
+ importlib.import_module(module)
1005
+
1006
+ def load_library(self, path):
1007
+ """
1008
+ Loads a shared library from the given path into the current process.
1009
+
1010
+ The library being loaded may run global initialization code to register
1011
+ custom operators with the PyTorch JIT runtime. This allows dynamically
1012
+ loading custom operators. For this, you should compile your operator
1013
+ and the static registration code into a shared library object, and then
1014
+ call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
1015
+ shared object.
1016
+
1017
+ After the library is loaded, it is added to the
1018
+ ``torch.ops.loaded_libraries`` attribute, a set that may be inspected
1019
+ for the paths of all libraries loaded using this function.
1020
+
1021
+ Args:
1022
+ path (str): A path to a shared library to load.
1023
+ """
1024
+ if torch._running_with_deploy():
1025
+ return
1026
+
1027
+ path = _utils_internal.resolve_library_path(path)
1028
+ with dl_open_guard():
1029
+ # Import the shared library into the process, thus running its
1030
+ # static (global) initialization code in order to register custom
1031
+ # operators with the JIT.
1032
+ ctypes.CDLL(path)
1033
+ self.loaded_libraries.add(path)
1034
+
1035
+
1036
+ # The ops "namespace"
1037
+ ops = _Ops()
venv/lib/python3.10/site-packages/torch/_python_dispatcher.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import torch._C as C
4
+
5
+
6
+ """
7
+ PythonDispatcher class is a thin python-binding to C++ dispatcher and it
8
+ is designed to show how dispatcher precompute works. In particular,
9
+ it shows for a certain op `foo`, what the computed dispatch table looks
10
+ like after user register their kernels to certains dispatch keys.
11
+
12
+ In the real C++ dispatcher we support many dispatch keys for different
13
+ functionalities. For simplicity PythonDispatcher only supports dispatch
14
+ keys for a single example of each use case. These use cases are listed below:
15
+
16
+ - CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
17
+ autograd kernel in pytorch core library.
18
+ E.g. CPU, CUDA
19
+ - FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
20
+ inference kernels, but they share the same autograd kernel specified in AutogradOther.
21
+ E.g. FPGA, SparseCsrCPU
22
+ - XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
23
+ kernel defined in pytorch core library. Backend owner is responsible for registering both
24
+ inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
25
+ E.g. XLA, XPU, MPS
26
+ - CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
27
+ Kernels registered to this key MUST work for inference for all backends.
28
+ - Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
29
+ Kernels registered to this key MUST work for autograd for all backends.
30
+ - CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
31
+ Kernels registered to this key MUST work for both inference + autograd for all backends.
32
+
33
+ Note we only allow registrations to alias keys inside pytorch core library. E.g
34
+ you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
35
+ kernel from torch-xla extension, instead you should upstream the kernel into
36
+ pytorch/pytorch repo so that it's available for all backends and continuously
37
+ tested even without the extension.
38
+
39
+ Usage:
40
+ dispatcher = PythonDispatcher()
41
+ dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
42
+ print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
43
+ # For more debugging information
44
+ # print(dispatcher.keys())
45
+ # print(dispatcher.registrations())
46
+ # print(dispatcher.rawRegistrations())
47
+ # print(dispatcher.rawDispatchTable())
48
+ PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
49
+ This file only provides the simplified API for developers, relevant test code is located in
50
+ test/test_dispatch.py
51
+ """
52
+
53
+
54
+ class PythonDispatcher:
55
+ namespace = "__test__"
56
+ name = "foo"
57
+ # fmt: off
58
+ runtime_keys = [
59
+ "CPU", "AutogradCPU",
60
+ "FPGA", "AutogradOther",
61
+ "XLA", "AutogradXLA",
62
+ "Lazy", "AutogradLazy",
63
+ ]
64
+ # fmt: on
65
+ alias_keys = [
66
+ "CompositeExplicitAutograd",
67
+ "Autograd",
68
+ "CompositeImplicitAutograd",
69
+ ]
70
+ supported_keys = runtime_keys + alias_keys
71
+
72
+ def __init__(self):
73
+ C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
74
+ self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
75
+ self.ref.def_("foo(Tensor x) -> Tensor")
76
+
77
+ """
78
+ Returns a list of dispatch keys supported by PythonDispatcher.
79
+ You can register kernels to these keys.
80
+ """
81
+
82
+ def keys(self):
83
+ return self.supported_keys
84
+
85
+ """
86
+ Register kernels to the target dispatchKeys.
87
+ dispatchKeys(list[str]): a list of dispatch keys that you want to register
88
+ your own kernel. Note that you don't need to write the kernel yourself in
89
+ this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
90
+ automatically generated and registered.
91
+ """
92
+
93
+ def register(self, dispatchKeys):
94
+ # Overriden is not supported and triggers a warning in C++ dispatcher.
95
+ if len(set(dispatchKeys)) != len(dispatchKeys):
96
+ raise RuntimeError(
97
+ f"Overriden is not allowed but found duplicates in {dispatchKeys}."
98
+ )
99
+ # We currently forbid this in codegen instead of C++ dispatcher.
100
+ if (
101
+ "CompositeImplicitAutograd" in dispatchKeys
102
+ and "CompositeExplicitAutograd" in dispatchKeys
103
+ ):
104
+ raise RuntimeError(
105
+ "Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
106
+ )
107
+ for key in dispatchKeys:
108
+ if key not in self.supported_keys:
109
+ raise RuntimeError(
110
+ f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
111
+ )
112
+ self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
113
+
114
+ """
115
+ Helper function to format (key, kernel).
116
+ """
117
+
118
+ def _format_line(self, key, kernel):
119
+ return f"{key:<15} {kernel}\n"
120
+
121
+ """
122
+ Helper function to print a table header.
123
+ """
124
+
125
+ def _format_header(self, header):
126
+ s = f"""
127
+ {header}
128
+ """
129
+ s += self._format_line("key", "kernel")
130
+ s += "---------------------------\n"
131
+ return s
132
+
133
+ """
134
+ Returns raw output of all registration info for debugging only.
135
+ Use registrations() for a simplified version.
136
+ """
137
+
138
+ def rawRegistrations(self):
139
+ return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
140
+
141
+ """
142
+ Returns raw output of computed dispatch table for debugging only.
143
+ Use dispatchTable() for a simplified version.
144
+ """
145
+
146
+ def rawDispatchTable(self):
147
+ return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
148
+
149
+ """
150
+ Returns a table(str) including all the registrations from users.
151
+ Note this includes registrations to both runtime keys and alias keys.
152
+ """
153
+
154
+ def registrations(self):
155
+ output = self._format_header("Registered Kernels")
156
+ state = self.rawRegistrations()
157
+ state_entries = state.split("\n")
158
+ for line in state_entries:
159
+ first = line.split(":")[0]
160
+ if any(first.startswith(k) for k in self.supported_keys):
161
+ kernel = line.split("::")[0].split(" ")[1]
162
+ output += self._format_line(first, kernel)
163
+ return output
164
+
165
+ """
166
+ Returns the computed dispatch table(str). Note this only include
167
+ runtime keys, registrations to alias keys have been decoded to their
168
+ mapped runtime keys.
169
+ """
170
+
171
+ def dispatchTable(self):
172
+ output = self._format_header("Computed Dispatch Table")
173
+ table = self.rawDispatchTable()
174
+ table_entries = table.split("\n")
175
+ regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
176
+ for line in table_entries:
177
+ k = line.split(":")[0]
178
+ if k in self.runtime_keys:
179
+ entry = regex.sub("[", line)
180
+ output += self._format_line(k, entry.split(": ")[1])
181
+ return output