applied-ai-018 commited on
Commit
956e414
·
verified ·
1 Parent(s): 6ca79e0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pathvalidate/__init__.py +81 -0
  2. env-llmeval/lib/python3.10/site-packages/pathvalidate/__version__.py +6 -0
  3. env-llmeval/lib/python3.10/site-packages/pathvalidate/_base.py +237 -0
  4. env-llmeval/lib/python3.10/site-packages/pathvalidate/_const.py +40 -0
  5. env-llmeval/lib/python3.10/site-packages/pathvalidate/_symbol.py +92 -0
  6. env-llmeval/lib/python3.10/site-packages/pathvalidate/_types.py +8 -0
  7. env-llmeval/lib/python3.10/site-packages/pathvalidate/argparse.py +47 -0
  8. env-llmeval/lib/python3.10/site-packages/pathvalidate/click.py +48 -0
  9. env-llmeval/lib/python3.10/site-packages/pathvalidate/error.py +253 -0
  10. env-llmeval/lib/python3.10/site-packages/pathvalidate/handler.py +139 -0
  11. env-llmeval/lib/python3.10/site-packages/pathvalidate/py.typed +0 -0
  12. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Buenos_Aires +0 -0
  13. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/ComodRivadavia +0 -0
  14. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Mendoza +0 -0
  15. env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h +2 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h +631 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h +111 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h +83 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h +75 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h +239 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h +719 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h +83 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h +199 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h +40 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h +151 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUContext.h +20 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUDevice.h +13 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUEvent.h +132 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUGeneratorImpl.h +39 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/detail/XPUHooks.h +23 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Export.h +160 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h +14 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h +81 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h +115 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h +287 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h +17 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h +116 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/C++17.h +166 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h +48 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Exception.h +711 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h +75 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h +143 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h +135 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h +77 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h +223 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Optional.h +48 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h +236 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Registry.h +326 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h +87 -0
env-llmeval/lib/python3.10/site-packages/pathvalidate/__init__.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from .__version__ import __author__, __copyright__, __email__, __license__, __version__
6
+ from ._base import AbstractSanitizer, AbstractValidator
7
+ from ._common import (
8
+ ascii_symbols,
9
+ normalize_platform,
10
+ replace_ansi_escape,
11
+ replace_unprintable_char,
12
+ unprintable_ascii_chars,
13
+ validate_pathtype,
14
+ validate_unprintable_char,
15
+ )
16
+ from ._const import Platform
17
+ from ._filename import (
18
+ FileNameSanitizer,
19
+ FileNameValidator,
20
+ is_valid_filename,
21
+ sanitize_filename,
22
+ validate_filename,
23
+ )
24
+ from ._filepath import (
25
+ FilePathSanitizer,
26
+ FilePathValidator,
27
+ is_valid_filepath,
28
+ sanitize_filepath,
29
+ validate_filepath,
30
+ )
31
+ from ._ltsv import sanitize_ltsv_label, validate_ltsv_label
32
+ from ._symbol import replace_symbol, validate_symbol
33
+ from .error import (
34
+ ErrorReason,
35
+ InvalidCharError,
36
+ InvalidReservedNameError,
37
+ NullNameError,
38
+ ReservedNameError,
39
+ ValidationError,
40
+ ValidReservedNameError,
41
+ )
42
+
43
+
44
+ __all__ = (
45
+ "__author__",
46
+ "__copyright__",
47
+ "__email__",
48
+ "__license__",
49
+ "__version__",
50
+ "AbstractSanitizer",
51
+ "AbstractValidator",
52
+ "Platform",
53
+ "ascii_symbols",
54
+ "normalize_platform",
55
+ "replace_ansi_escape",
56
+ "replace_unprintable_char",
57
+ "unprintable_ascii_chars",
58
+ "validate_pathtype",
59
+ "validate_unprintable_char",
60
+ "FileNameSanitizer",
61
+ "FileNameValidator",
62
+ "is_valid_filename",
63
+ "sanitize_filename",
64
+ "validate_filename",
65
+ "FilePathSanitizer",
66
+ "FilePathValidator",
67
+ "is_valid_filepath",
68
+ "sanitize_filepath",
69
+ "validate_filepath",
70
+ "sanitize_ltsv_label",
71
+ "validate_ltsv_label",
72
+ "replace_symbol",
73
+ "validate_symbol",
74
+ "ErrorReason",
75
+ "InvalidCharError",
76
+ "InvalidReservedNameError",
77
+ "NullNameError",
78
+ "ReservedNameError",
79
+ "ValidationError",
80
+ "ValidReservedNameError",
81
+ )
env-llmeval/lib/python3.10/site-packages/pathvalidate/__version__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ __author__ = "Tsuyoshi Hombashi"
2
+ __copyright__ = f"Copyright 2016, {__author__}"
3
+ __license__ = "MIT License"
4
+ __version__ = "3.2.0"
5
+ __maintainer__ = __author__
6
+ __email__ = "[email protected]"
env-llmeval/lib/python3.10/site-packages/pathvalidate/_base.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import abc
6
+ import os
7
+ import sys
8
+ from typing import ClassVar, Optional, Sequence, Tuple
9
+
10
+ from ._common import normalize_platform, unprintable_ascii_chars
11
+ from ._const import DEFAULT_MIN_LEN, Platform
12
+ from ._types import PathType, PlatformType
13
+ from .error import ReservedNameError, ValidationError
14
+ from .handler import NullValueHandler, ReservedNameHandler, ValidationErrorHandler
15
+
16
+
17
+ class BaseFile:
18
+ _INVALID_PATH_CHARS: ClassVar[str] = "".join(unprintable_ascii_chars)
19
+ _INVALID_FILENAME_CHARS: ClassVar[str] = _INVALID_PATH_CHARS + "/"
20
+ _INVALID_WIN_PATH_CHARS: ClassVar[str] = _INVALID_PATH_CHARS + ':*?"<>|\t\n\r\x0b\x0c'
21
+ _INVALID_WIN_FILENAME_CHARS: ClassVar[str] = (
22
+ _INVALID_FILENAME_CHARS + _INVALID_WIN_PATH_CHARS + "\\"
23
+ )
24
+
25
+ @property
26
+ def platform(self) -> Platform:
27
+ return self.__platform
28
+
29
+ @property
30
+ def reserved_keywords(self) -> Tuple[str, ...]:
31
+ return self._additional_reserved_names
32
+
33
+ @property
34
+ def max_len(self) -> int:
35
+ return self._max_len
36
+
37
+ def __init__(
38
+ self,
39
+ max_len: int,
40
+ fs_encoding: Optional[str],
41
+ additional_reserved_names: Optional[Sequence[str]] = None,
42
+ platform_max_len: Optional[int] = None,
43
+ platform: Optional[PlatformType] = None,
44
+ ) -> None:
45
+ if additional_reserved_names is None:
46
+ additional_reserved_names = tuple()
47
+ self._additional_reserved_names = tuple(n.upper() for n in additional_reserved_names)
48
+
49
+ self.__platform = normalize_platform(platform)
50
+
51
+ if platform_max_len is None:
52
+ platform_max_len = self._get_default_max_path_len()
53
+
54
+ if max_len <= 0:
55
+ self._max_len = platform_max_len
56
+ else:
57
+ self._max_len = max_len
58
+
59
+ self._max_len = min(self._max_len, platform_max_len)
60
+
61
+ if fs_encoding:
62
+ self._fs_encoding = fs_encoding
63
+ else:
64
+ self._fs_encoding = sys.getfilesystemencoding()
65
+
66
+ def _is_posix(self) -> bool:
67
+ return self.platform == Platform.POSIX
68
+
69
+ def _is_universal(self) -> bool:
70
+ return self.platform == Platform.UNIVERSAL
71
+
72
+ def _is_linux(self, include_universal: bool = False) -> bool:
73
+ if include_universal:
74
+ return self.platform in (Platform.UNIVERSAL, Platform.LINUX)
75
+
76
+ return self.platform == Platform.LINUX
77
+
78
+ def _is_windows(self, include_universal: bool = False) -> bool:
79
+ if include_universal:
80
+ return self.platform in (Platform.UNIVERSAL, Platform.WINDOWS)
81
+
82
+ return self.platform == Platform.WINDOWS
83
+
84
+ def _is_macos(self, include_universal: bool = False) -> bool:
85
+ if include_universal:
86
+ return self.platform in (Platform.UNIVERSAL, Platform.MACOS)
87
+
88
+ return self.platform == Platform.MACOS
89
+
90
+ def _get_default_max_path_len(self) -> int:
91
+ if self._is_linux():
92
+ return 4096
93
+
94
+ if self._is_windows():
95
+ return 260
96
+
97
+ if self._is_posix() or self._is_macos():
98
+ return 1024
99
+
100
+ return 260 # universal
101
+
102
+
103
+ class AbstractValidator(BaseFile, metaclass=abc.ABCMeta):
104
+ def __init__(
105
+ self,
106
+ max_len: int,
107
+ fs_encoding: Optional[str],
108
+ check_reserved: bool,
109
+ additional_reserved_names: Optional[Sequence[str]] = None,
110
+ platform_max_len: Optional[int] = None,
111
+ platform: Optional[PlatformType] = None,
112
+ ) -> None:
113
+ self._check_reserved = check_reserved
114
+
115
+ super().__init__(
116
+ max_len,
117
+ fs_encoding,
118
+ additional_reserved_names=additional_reserved_names,
119
+ platform_max_len=platform_max_len,
120
+ platform=platform,
121
+ )
122
+
123
+ @abc.abstractproperty
124
+ def min_len(self) -> int: # pragma: no cover
125
+ pass
126
+
127
+ @abc.abstractmethod
128
+ def validate(self, value: PathType) -> None: # pragma: no cover
129
+ pass
130
+
131
+ def is_valid(self, value: PathType) -> bool:
132
+ try:
133
+ self.validate(value)
134
+ except (TypeError, ValidationError):
135
+ return False
136
+
137
+ return True
138
+
139
+ def _is_reserved_keyword(self, value: str) -> bool:
140
+ return value in self.reserved_keywords
141
+
142
+
143
+ class AbstractSanitizer(BaseFile, metaclass=abc.ABCMeta):
144
+ def __init__(
145
+ self,
146
+ validator: AbstractValidator,
147
+ max_len: int,
148
+ fs_encoding: Optional[str],
149
+ validate_after_sanitize: bool,
150
+ null_value_handler: Optional[ValidationErrorHandler] = None,
151
+ reserved_name_handler: Optional[ValidationErrorHandler] = None,
152
+ additional_reserved_names: Optional[Sequence[str]] = None,
153
+ platform_max_len: Optional[int] = None,
154
+ platform: Optional[PlatformType] = None,
155
+ ) -> None:
156
+ super().__init__(
157
+ max_len=max_len,
158
+ fs_encoding=fs_encoding,
159
+ additional_reserved_names=additional_reserved_names,
160
+ platform_max_len=platform_max_len,
161
+ platform=platform,
162
+ )
163
+
164
+ if null_value_handler is None:
165
+ null_value_handler = NullValueHandler.return_null_string
166
+ self._null_value_handler = null_value_handler
167
+
168
+ if reserved_name_handler is None:
169
+ reserved_name_handler = ReservedNameHandler.add_trailing_underscore
170
+ self._reserved_name_handler = reserved_name_handler
171
+
172
+ self._validate_after_sanitize = validate_after_sanitize
173
+
174
+ self._validator = validator
175
+
176
+ @abc.abstractmethod
177
+ def sanitize(self, value: PathType, replacement_text: str = "") -> PathType: # pragma: no cover
178
+ pass
179
+
180
+
181
+ class BaseValidator(AbstractValidator):
182
+ @property
183
+ def min_len(self) -> int:
184
+ return self._min_len
185
+
186
+ def __init__(
187
+ self,
188
+ min_len: int,
189
+ max_len: int,
190
+ fs_encoding: Optional[str],
191
+ check_reserved: bool,
192
+ additional_reserved_names: Optional[Sequence[str]] = None,
193
+ platform_max_len: Optional[int] = None,
194
+ platform: Optional[PlatformType] = None,
195
+ ) -> None:
196
+ if min_len <= 0:
197
+ min_len = DEFAULT_MIN_LEN
198
+ self._min_len = max(min_len, 1)
199
+
200
+ super().__init__(
201
+ max_len=max_len,
202
+ fs_encoding=fs_encoding,
203
+ check_reserved=check_reserved,
204
+ additional_reserved_names=additional_reserved_names,
205
+ platform_max_len=platform_max_len,
206
+ platform=platform,
207
+ )
208
+
209
+ self._validate_max_len()
210
+
211
+ def _validate_reserved_keywords(self, name: str) -> None:
212
+ if not self._check_reserved:
213
+ return
214
+
215
+ root_name = self.__extract_root_name(name)
216
+ base_name = os.path.basename(name).upper()
217
+
218
+ if self._is_reserved_keyword(root_name.upper()) or self._is_reserved_keyword(
219
+ base_name.upper()
220
+ ):
221
+ raise ReservedNameError(
222
+ f"'{root_name}' is a reserved name",
223
+ reusable_name=False,
224
+ reserved_name=root_name,
225
+ platform=self.platform,
226
+ )
227
+
228
+ def _validate_max_len(self) -> None:
229
+ if self.max_len < 1:
230
+ raise ValueError("max_len must be greater or equal to one")
231
+
232
+ if self.min_len > self.max_len:
233
+ raise ValueError("min_len must be lower than max_len")
234
+
235
+ @staticmethod
236
+ def __extract_root_name(path: str) -> str:
237
+ return os.path.splitext(os.path.basename(path))[0]
env-llmeval/lib/python3.10/site-packages/pathvalidate/_const.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+
3
+
4
+ DEFAULT_MIN_LEN = 1
5
+ INVALID_CHAR_ERR_MSG_TMPL = "invalids=({invalid}), value={value}"
6
+
7
+
8
+ _NTFS_RESERVED_FILE_NAMES = (
9
+ "$Mft",
10
+ "$MftMirr",
11
+ "$LogFile",
12
+ "$Volume",
13
+ "$AttrDef",
14
+ "$Bitmap",
15
+ "$Boot",
16
+ "$BadClus",
17
+ "$Secure",
18
+ "$Upcase",
19
+ "$Extend",
20
+ "$Quota",
21
+ "$ObjId",
22
+ "$Reparse",
23
+ ) # Only in root directory
24
+
25
+
26
+ @enum.unique
27
+ class Platform(enum.Enum):
28
+ """
29
+ Platform specifier enumeration.
30
+ """
31
+
32
+ #: POSIX compatible platform.
33
+ POSIX = "POSIX"
34
+
35
+ #: platform independent. note that absolute paths cannot specify this.
36
+ UNIVERSAL = "universal"
37
+
38
+ LINUX = "Linux"
39
+ WINDOWS = "Windows"
40
+ MACOS = "macOS"
env-llmeval/lib/python3.10/site-packages/pathvalidate/_symbol.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import re
6
+ from typing import Sequence
7
+
8
+ from ._common import ascii_symbols, to_str, unprintable_ascii_chars
9
+ from .error import InvalidCharError
10
+
11
+
12
+ __RE_SYMBOL = re.compile(
13
+ "[{}]".format(re.escape("".join(ascii_symbols + unprintable_ascii_chars))), re.UNICODE
14
+ )
15
+
16
+
17
+ def validate_symbol(text: str) -> None:
18
+ """
19
+ Verifying whether symbol(s) included in the ``text`` or not.
20
+
21
+ Args:
22
+ text:
23
+ Input text to validate.
24
+
25
+ Raises:
26
+ ValidationError (ErrorReason.INVALID_CHARACTER):
27
+ If symbol(s) included in the ``text``.
28
+ """
29
+
30
+ match_list = __RE_SYMBOL.findall(to_str(text))
31
+ if match_list:
32
+ raise InvalidCharError(f"invalid symbols found: {match_list}")
33
+
34
+
35
+ def replace_symbol(
36
+ text: str,
37
+ replacement_text: str = "",
38
+ exclude_symbols: Sequence[str] = [],
39
+ is_replace_consecutive_chars: bool = False,
40
+ is_strip: bool = False,
41
+ ) -> str:
42
+ """
43
+ Replace all of the symbols in the ``text``.
44
+
45
+ Args:
46
+ text:
47
+ Input text.
48
+ replacement_text:
49
+ Replacement text.
50
+ exclude_symbols:
51
+ Symbols that exclude from the replacement.
52
+ is_replace_consecutive_chars:
53
+ If |True|, replace consecutive multiple ``replacement_text`` characters
54
+ to a single character.
55
+ is_strip:
56
+ If |True|, strip ``replacement_text`` from the beginning/end of the replacement text.
57
+
58
+ Returns:
59
+ A replacement string.
60
+
61
+ Example:
62
+
63
+ :ref:`example-sanitize-symbol`
64
+ """
65
+
66
+ if exclude_symbols:
67
+ regexp = re.compile(
68
+ "[{}]".format(
69
+ re.escape(
70
+ "".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols))
71
+ )
72
+ ),
73
+ re.UNICODE,
74
+ )
75
+ else:
76
+ regexp = __RE_SYMBOL
77
+
78
+ try:
79
+ new_text = regexp.sub(replacement_text, to_str(text))
80
+ except TypeError:
81
+ raise TypeError("text must be a string")
82
+
83
+ if not replacement_text:
84
+ return new_text
85
+
86
+ if is_replace_consecutive_chars:
87
+ new_text = re.sub(f"{re.escape(replacement_text)}+", replacement_text, new_text)
88
+
89
+ if is_strip:
90
+ new_text = new_text.strip(replacement_text)
91
+
92
+ return new_text
env-llmeval/lib/python3.10/site-packages/pathvalidate/_types.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import TypeVar
3
+
4
+ from ._const import Platform
5
+
6
+
7
+ PathType = TypeVar("PathType", str, Path)
8
+ PlatformType = TypeVar("PlatformType", str, Platform)
env-llmeval/lib/python3.10/site-packages/pathvalidate/argparse.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ from argparse import ArgumentTypeError
6
+
7
+ from ._filename import sanitize_filename, validate_filename
8
+ from ._filepath import sanitize_filepath, validate_filepath
9
+ from .error import ValidationError
10
+
11
+
12
+ def validate_filename_arg(value: str) -> str:
13
+ if not value:
14
+ return ""
15
+
16
+ try:
17
+ validate_filename(value)
18
+ except ValidationError as e:
19
+ raise ArgumentTypeError(e)
20
+
21
+ return value
22
+
23
+
24
+ def validate_filepath_arg(value: str) -> str:
25
+ if not value:
26
+ return ""
27
+
28
+ try:
29
+ validate_filepath(value, platform="auto")
30
+ except ValidationError as e:
31
+ raise ArgumentTypeError(e)
32
+
33
+ return value
34
+
35
+
36
+ def sanitize_filename_arg(value: str) -> str:
37
+ if not value:
38
+ return ""
39
+
40
+ return sanitize_filename(value)
41
+
42
+
43
+ def sanitize_filepath_arg(value: str) -> str:
44
+ if not value:
45
+ return ""
46
+
47
+ return sanitize_filepath(value, platform="auto")
env-llmeval/lib/python3.10/site-packages/pathvalidate/click.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import click
6
+ from click.core import Context, Option
7
+
8
+ from ._filename import sanitize_filename, validate_filename
9
+ from ._filepath import sanitize_filepath, validate_filepath
10
+ from .error import ValidationError
11
+
12
+
13
+ def validate_filename_arg(ctx: Context, param: Option, value: str) -> str:
14
+ if not value:
15
+ return ""
16
+
17
+ try:
18
+ validate_filename(value)
19
+ except ValidationError as e:
20
+ raise click.BadParameter(str(e))
21
+
22
+ return value
23
+
24
+
25
+ def validate_filepath_arg(ctx: Context, param: Option, value: str) -> str:
26
+ if not value:
27
+ return ""
28
+
29
+ try:
30
+ validate_filepath(value)
31
+ except ValidationError as e:
32
+ raise click.BadParameter(str(e))
33
+
34
+ return value
35
+
36
+
37
+ def sanitize_filename_arg(ctx: Context, param: Option, value: str) -> str:
38
+ if not value:
39
+ return ""
40
+
41
+ return sanitize_filename(value)
42
+
43
+
44
+ def sanitize_filepath_arg(ctx: Context, param: Option, value: str) -> str:
45
+ if not value:
46
+ return ""
47
+
48
+ return sanitize_filepath(value)
env-llmeval/lib/python3.10/site-packages/pathvalidate/error.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+ import enum
6
+ from typing import Dict, Optional
7
+
8
+ from ._const import Platform
9
+
10
+
11
+ def _to_error_code(code: int) -> str:
12
+ return f"PV{code:04d}"
13
+
14
+
15
+ class ErrorAttrKey:
16
+ BYTE_COUNT = "byte_count"
17
+ DESCRIPTION = "description"
18
+ FS_ENCODING = "fs_encoding"
19
+ PLATFORM = "platform"
20
+ REASON = "reason"
21
+ RESERVED_NAME = "reserved_name"
22
+ REUSABLE_NAME = "reusable_name"
23
+
24
+
25
+ @enum.unique
26
+ class ErrorReason(enum.Enum):
27
+ """
28
+ Validation error reasons.
29
+ """
30
+
31
+ NULL_NAME = (_to_error_code(1001), "NULL_NAME", "the value must not be an empty")
32
+ RESERVED_NAME = (
33
+ _to_error_code(1002),
34
+ "RESERVED_NAME",
35
+ "found a reserved name by a platform",
36
+ )
37
+ INVALID_CHARACTER = (
38
+ _to_error_code(1100),
39
+ "INVALID_CHARACTER",
40
+ "invalid characters found",
41
+ )
42
+ INVALID_LENGTH = (
43
+ _to_error_code(1101),
44
+ "INVALID_LENGTH",
45
+ "found an invalid string length",
46
+ )
47
+ FOUND_ABS_PATH = (
48
+ _to_error_code(1200),
49
+ "FOUND_ABS_PATH",
50
+ "found an absolute path where must be a relative path",
51
+ )
52
+ MALFORMED_ABS_PATH = (
53
+ _to_error_code(1201),
54
+ "MALFORMED_ABS_PATH",
55
+ "found a malformed absolute path",
56
+ )
57
+ INVALID_AFTER_SANITIZE = (
58
+ _to_error_code(2000),
59
+ "INVALID_AFTER_SANITIZE",
60
+ "found invalid value after sanitizing",
61
+ )
62
+
63
+ @property
64
+ def code(self) -> str:
65
+ """str: Error code."""
66
+ return self.__code
67
+
68
+ @property
69
+ def name(self) -> str:
70
+ """str: Error reason name."""
71
+ return self.__name
72
+
73
+ @property
74
+ def description(self) -> str:
75
+ """str: Error reason description."""
76
+ return self.__description
77
+
78
+ def __init__(self, code: str, name: str, description: str) -> None:
79
+ self.__name = name
80
+ self.__code = code
81
+ self.__description = description
82
+
83
+ def __str__(self) -> str:
84
+ return f"[{self.__code}] {self.__description}"
85
+
86
+
87
+ class ValidationError(ValueError):
88
+ """
89
+ Exception class of validation errors.
90
+ """
91
+
92
+ @property
93
+ def platform(self) -> Optional[Platform]:
94
+ """
95
+ :py:class:`~pathvalidate.Platform`: Platform information.
96
+ """
97
+ return self.__platform
98
+
99
+ @property
100
+ def reason(self) -> ErrorReason:
101
+ """
102
+ :py:class:`~pathvalidate.error.ErrorReason`: The cause of the error.
103
+ """
104
+ return self.__reason
105
+
106
+ @property
107
+ def description(self) -> Optional[str]:
108
+ """Optional[str]: Error description."""
109
+ return self.__description
110
+
111
+ @property
112
+ def reserved_name(self) -> str:
113
+ """str: Reserved name."""
114
+ return self.__reserved_name
115
+
116
+ @property
117
+ def reusable_name(self) -> Optional[bool]:
118
+ """Optional[bool]: Whether the name is reusable or not."""
119
+ return self.__reusable_name
120
+
121
+ @property
122
+ def fs_encoding(self) -> Optional[str]:
123
+ """Optional[str]: File system encoding."""
124
+ return self.__fs_encoding
125
+
126
+ @property
127
+ def byte_count(self) -> Optional[int]:
128
+ """Optional[int]: Byte count of the path."""
129
+ return self.__byte_count
130
+
131
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
132
+ if ErrorAttrKey.REASON not in kwargs:
133
+ raise ValueError(f"{ErrorAttrKey.REASON} must be specified")
134
+
135
+ self.__reason: ErrorReason = kwargs.pop(ErrorAttrKey.REASON)
136
+ self.__byte_count: Optional[int] = kwargs.pop(ErrorAttrKey.BYTE_COUNT, None)
137
+ self.__platform: Optional[Platform] = kwargs.pop(ErrorAttrKey.PLATFORM, None)
138
+ self.__description: Optional[str] = kwargs.pop(ErrorAttrKey.DESCRIPTION, None)
139
+ self.__reserved_name: str = kwargs.pop(ErrorAttrKey.RESERVED_NAME, "")
140
+ self.__reusable_name: Optional[bool] = kwargs.pop(ErrorAttrKey.REUSABLE_NAME, None)
141
+ self.__fs_encoding: Optional[str] = kwargs.pop(ErrorAttrKey.FS_ENCODING, None)
142
+
143
+ try:
144
+ super().__init__(*args[0], **kwargs)
145
+ except IndexError:
146
+ super().__init__(*args, **kwargs)
147
+
148
+ def as_slog(self) -> Dict[str, str]:
149
+ """Return a dictionary representation of the error.
150
+
151
+ Returns:
152
+ Dict[str, str]: A dictionary representation of the error.
153
+ """
154
+
155
+ slog: Dict[str, str] = {
156
+ "code": self.reason.code,
157
+ ErrorAttrKey.DESCRIPTION: self.reason.description,
158
+ }
159
+ if self.platform:
160
+ slog[ErrorAttrKey.PLATFORM] = self.platform.value
161
+ if self.description:
162
+ slog[ErrorAttrKey.DESCRIPTION] = self.description
163
+ if self.__reusable_name is not None:
164
+ slog[ErrorAttrKey.REUSABLE_NAME] = str(self.__reusable_name)
165
+ if self.__fs_encoding:
166
+ slog[ErrorAttrKey.FS_ENCODING] = self.__fs_encoding
167
+ if self.__byte_count:
168
+ slog[ErrorAttrKey.BYTE_COUNT] = str(self.__byte_count)
169
+
170
+ return slog
171
+
172
+ def __str__(self) -> str:
173
+ item_list = []
174
+ header = str(self.reason)
175
+
176
+ if Exception.__str__(self):
177
+ item_list.append(Exception.__str__(self))
178
+
179
+ if self.platform:
180
+ item_list.append(f"{ErrorAttrKey.PLATFORM}={self.platform.value}")
181
+ if self.description:
182
+ item_list.append(f"{ErrorAttrKey.DESCRIPTION}={self.description}")
183
+ if self.__reusable_name is not None:
184
+ item_list.append(f"{ErrorAttrKey.REUSABLE_NAME}={self.reusable_name}")
185
+ if self.__fs_encoding:
186
+ item_list.append(f"{ErrorAttrKey.FS_ENCODING}={self.__fs_encoding}")
187
+ if self.__byte_count is not None:
188
+ item_list.append(f"{ErrorAttrKey.BYTE_COUNT}={self.__byte_count:,d}")
189
+
190
+ if item_list:
191
+ header += ": "
192
+
193
+ return header + ", ".join(item_list).strip()
194
+
195
+ def __repr__(self) -> str:
196
+ return self.__str__()
197
+
198
+
199
+ class NullNameError(ValidationError):
200
+ """[Deprecated]
201
+ Exception raised when a name is empty.
202
+ """
203
+
204
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
205
+ kwargs[ErrorAttrKey.REASON] = ErrorReason.NULL_NAME
206
+
207
+ super().__init__(args, **kwargs)
208
+
209
+
210
+ class InvalidCharError(ValidationError):
211
+ """
212
+ Exception raised when includes invalid character(s) within a string.
213
+ """
214
+
215
+ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
216
+ kwargs[ErrorAttrKey.REASON] = ErrorReason.INVALID_CHARACTER
217
+
218
+ super().__init__(args, **kwargs)
219
+
220
+
221
+ class ReservedNameError(ValidationError):
222
+ """
223
+ Exception raised when a string matched a reserved name.
224
+ """
225
+
226
+ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
227
+ kwargs[ErrorAttrKey.REASON] = ErrorReason.RESERVED_NAME
228
+
229
+ super().__init__(args, **kwargs)
230
+
231
+
232
+ class ValidReservedNameError(ReservedNameError):
233
+ """[Deprecated]
234
+ Exception raised when a string matched a reserved name.
235
+ However, it can be used as a name.
236
+ """
237
+
238
+ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
239
+ kwargs[ErrorAttrKey.REUSABLE_NAME] = True
240
+
241
+ super().__init__(args, **kwargs)
242
+
243
+
244
+ class InvalidReservedNameError(ReservedNameError):
245
+ """[Deprecated]
246
+ Exception raised when a string matched a reserved name.
247
+ Moreover, the reserved name is invalid as a name.
248
+ """
249
+
250
+ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def]
251
+ kwargs[ErrorAttrKey.REUSABLE_NAME] = False
252
+
253
+ super().__init__(args, **kwargs)
env-llmeval/lib/python3.10/site-packages/pathvalidate/handler.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ .. codeauthor:: Tsuyoshi Hombashi <[email protected]>
3
+ """
4
+
5
+
6
+ import warnings
7
+ from datetime import datetime
8
+ from typing import Callable
9
+
10
+ from .error import ValidationError
11
+
12
+
13
+ ValidationErrorHandler = Callable[[ValidationError], str]
14
+
15
+
16
+ def return_null_string(e: ValidationError) -> str:
17
+ """Null value handler that always returns an empty string.
18
+
19
+ Args:
20
+ e (ValidationError): A validation error.
21
+
22
+ Returns:
23
+ str: An empty string.
24
+ """
25
+
26
+ warnings.warn(
27
+ "'return_null_string' is deprecated. Use 'NullValueHandler.return_null_string' instead.",
28
+ DeprecationWarning,
29
+ )
30
+
31
+ return ""
32
+
33
+
34
+ def return_timestamp(e: ValidationError) -> str:
35
+ """Null value handler that returns a timestamp of when the function was called.
36
+
37
+ Args:
38
+ e (ValidationError): A validation error.
39
+
40
+ Returns:
41
+ str: A timestamp.
42
+ """
43
+
44
+ warnings.warn(
45
+ "'return_timestamp' is deprecated. Use 'NullValueHandler.reserved_name_handler' instead.",
46
+ DeprecationWarning,
47
+ )
48
+
49
+ return str(datetime.now().timestamp())
50
+
51
+
52
+ def raise_error(e: ValidationError) -> str:
53
+ """Null value handler that always raises an exception.
54
+
55
+ Args:
56
+ e (ValidationError): A validation error.
57
+
58
+ Raises:
59
+ ValidationError: Always raised.
60
+ """
61
+
62
+ raise e
63
+
64
+
65
+ class NullValueHandler:
66
+ @classmethod
67
+ def return_null_string(cls, e: ValidationError) -> str:
68
+ """Null value handler that always returns an empty string.
69
+
70
+ Args:
71
+ e (ValidationError): A validation error.
72
+
73
+ Returns:
74
+ str: An empty string.
75
+ """
76
+
77
+ return ""
78
+
79
+ @classmethod
80
+ def return_timestamp(cls, e: ValidationError) -> str:
81
+ """Null value handler that returns a timestamp of when the function was called.
82
+
83
+ Args:
84
+ e (ValidationError): A validation error.
85
+
86
+ Returns:
87
+ str: A timestamp.
88
+ """
89
+
90
+ return str(datetime.now().timestamp())
91
+
92
+
93
+ class ReservedNameHandler:
94
+ @classmethod
95
+ def add_leading_underscore(cls, e: ValidationError) -> str:
96
+ """Reserved name handler that adds a leading underscore (``"_"``) to the name
97
+ except for ``"."`` and ``".."``.
98
+
99
+ Args:
100
+ e (ValidationError): A reserved name error.
101
+
102
+ Returns:
103
+ str: The converted name.
104
+ """
105
+
106
+ if e.reserved_name in (".", "..") or e.reusable_name:
107
+ return e.reserved_name
108
+
109
+ return f"_{e.reserved_name}"
110
+
111
+ @classmethod
112
+ def add_trailing_underscore(cls, e: ValidationError) -> str:
113
+ """Reserved name handler that adds a trailing underscore (``"_"``) to the name
114
+ except for ``"."`` and ``".."``.
115
+
116
+ Args:
117
+ e (ValidationError): A reserved name error.
118
+
119
+ Returns:
120
+ str: The converted name.
121
+ """
122
+
123
+ if e.reserved_name in (".", "..") or e.reusable_name:
124
+ return e.reserved_name
125
+
126
+ return f"{e.reserved_name}_"
127
+
128
+ @classmethod
129
+ def as_is(cls, e: ValidationError) -> str:
130
+ """Reserved name handler that returns the name as is.
131
+
132
+ Args:
133
+ e (ValidationError): A reserved name error.
134
+
135
+ Returns:
136
+ str: The name as is.
137
+ """
138
+
139
+ return e.reserved_name
env-llmeval/lib/python3.10/site-packages/pathvalidate/py.typed ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Buenos_Aires ADDED
Binary file (708 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/ComodRivadavia ADDED
Binary file (708 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Mendoza ADDED
Binary file (708 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #include <c10/util/Backtrace.h>
2
+ #include <c10/util/Type.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h ADDED
@@ -0,0 +1,631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/ivalue_to.h>
4
+ #include <c10/util/ArrayRef.h>
5
+ #include <c10/util/Exception.h>
6
+
7
+ #include <functional>
8
+ #include <initializer_list>
9
+ #include <iterator>
10
+ #include <type_traits>
11
+
12
+ /*
13
+ * [Note: IListRef]
14
+ * Wrapper around different API containers (e.g. boxed and unboxed).
15
+ *
16
+ * What is it?
17
+ * ===========
18
+ * It is a tagged union of both boxed and unboxed API containers.
19
+ * Working implementations:
20
+ *
21
+ * - `IListRef<at::Tensor>`
22
+ * - `IListRef<at::OptionalTensorRef>`
23
+ *
24
+ * Note that `IListRef` is a view type. Meaning that it won't own the
25
+ * tensors it holds. It's intended to be used only as argument parameters.
26
+ * Specifically, where these 2 worlds overlap.
27
+ *
28
+ * What is this for?
29
+ * =================
30
+ * Historically, PyTorch has maintained 2 different APIs: the unboxed
31
+ * (called from C++ API and Python eager mode) and boxed APIs (called
32
+ * from the TorchScript JIT, mobile interpreter, and boxed fallbacks).
33
+ *
34
+ * Calling unboxed kernels from the boxed "world" and vice-versa may
35
+ * result in non-negligible overhead. Lists are one of those types:
36
+ *
37
+ * - Boxed world: `c10::List`
38
+ * - Unboxed world: `c10::ArrayRef`
39
+ *
40
+ * In this context, `c10::IListRef` solves this problem by wrapping those
41
+ * 2 container types, so that we don't need to convert from one to
42
+ * the other.
43
+ *
44
+ * (see https://github.com/pytorch/pytorch/issues/66328)
45
+ *
46
+ * What does it do?
47
+ * ================
48
+ * This container wraps around the different tagged containers
49
+ * (currently, only boxed and unboxed), without incurring in extra
50
+ * overhead for converting from one to another. It does so while
51
+ * exposing usual container methods, which dispatch to corresponding
52
+ * implementations.
53
+ *
54
+ * While it works with different container types, it introduces
55
+ * overhead for repeatedly calling member functions (since those will
56
+ * get dispatched, again). Therefore, you should only use it to iterate
57
+ * through the list up to one time. If you need to do more complex things,
58
+ * call `materialize()` first.
59
+ *
60
+ * Adding support for a new Tag
61
+ * ============================
62
+ * Suppose we want to add a new tag: `Chest`. Here are the steps
63
+ * we would have to go through:
64
+ *
65
+ * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`.
66
+ *
67
+ * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
68
+ * ...
69
+ * _(Chest, ##__VA_ARGS__)
70
+ *
71
+ * 2. Add type aliases, union members, and constructors.
72
+ *
73
+ * template <typename T>
74
+ * class IListRef {
75
+ * ...
76
+ * using chest_type =
77
+ * typename detail::IListRefTagImpl<T, IListRefTag::Chest>::list_type;
78
+ * ...
79
+ * IListRef(...) : tag_(IListRefTag::Chest) {
80
+ * ...
81
+ * }
82
+ * ...
83
+ * union Payload {
84
+ * ...
85
+ * chest_type chest;
86
+ * ...
87
+ * };
88
+ * ...
89
+ * };
90
+ *
91
+ * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's
92
+ * preferable to make the default implementation work for `T = Tensor`
93
+ * (both `Unboxed` and `Boxed` do it).
94
+ *
95
+ * template <typename T, typename ListElemT>
96
+ * class IListRefTagImplBase<IListRefTag::Chest, T, ListElemT> {
97
+ * public:
98
+ * using elem_type = ListElemT;
99
+ * using list_type = ChestContainer<elem_type>;
100
+ *
101
+ * static const list_type& unwrap(const IListRef<T>& ilist) { ... }
102
+ *
103
+ * static typename list_type::const_iterator& unwrap(
104
+ * IListRefIterator<T>& it) { ... }
105
+ *
106
+ * static const typename list_type::const_iterator& unwrap(
107
+ * const IListRefIterator<T>& it) { ... }
108
+ *
109
+ * static IListRefConstRef<T> iterator_get(
110
+ * const typename list_type::const_iterator& it) { ... }
111
+ * }
112
+ *
113
+ * 4. Add an specialization for each of the already supported types.
114
+ * Finally, for consistency, add them to the tracking list.
115
+ * (see [Note: IListRefTagImpl Specializations])
116
+ *
117
+ * template <>
118
+ * class IListRefTagImpl<IListRefTag::Chest, at::Tensor>
119
+ * : public IListRefTagImplBase<IListRefTag::Chest, at::Tensor> {};
120
+ *
121
+ * Adding support for a new Type
122
+ * =============================
123
+ * Suppose we want to add support for a new type: `Matrix`.
124
+ * Here are the steps we would have to go through:
125
+ *
126
+ * 1. Add an specialization for each of the existing tags.
127
+ * For consistency, add them to the tracking list.
128
+ * (see [Note: IListRefTagImpl Specializations])
129
+ *
130
+ * template <>
131
+ * class IListRefTagImpl<IListRefTag::Unboxed, Matrix>
132
+ * : public IListRefTagImplBase<IListRefTag::Unboxed, Matrix> {};
133
+ *
134
+ * template <>
135
+ * class IListRefTagImpl<Matrix, IListRefTag::Boxed>
136
+ * : public IListRefTagImplBase<IListRefTag::Boxed, Matrix> {};
137
+ *
138
+ * Common Problems
139
+ * ===============
140
+ * 1. One of `IListRef(Iterator)` methods are failing to compile.
141
+ *
142
+ * That may be happening because the container type you added
143
+ * is not compatible with the code written for that method. If
144
+ * that's true, then you might have to transform that code into
145
+ * a static method call (see `List::operator[]` method).
146
+ *
147
+ * 2. Can't make `IListRefIterator<T>::operator*` return a const-reference.
148
+ *
149
+ * First, keep in mind that we assume that boxed containers will
150
+ * have to deal with `IValue` (e.g. `c10::List`). In this context,
151
+ * what may be happening is that `IValue` doesn't store internally
152
+ * your type `T`. Instead, it constructs a type new `T` everytime
153
+ * you try to get `T` for it (see `IListRef<at::OptinalTensorRef>`).
154
+ */
155
+
156
+ namespace c10 {
157
+ template <typename T>
158
+ class IListRef;
159
+
160
+ /*
161
+ * Applies arbitrary macros to each `IListRefTag`.
162
+ */
163
+ #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \
164
+ _(Unboxed, ##__VA_ARGS__) \
165
+ _(Boxed, ##__VA_ARGS__) \
166
+ _(Materialized, ##__VA_ARGS__)
167
+
168
+ /*
169
+ * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`,
170
+ * while bringing to scope:
171
+ *
172
+ * - `ImplT`: the implementation class for `TAG`
173
+ * - `this_`: the result of unwrapping `this`
174
+ */
175
+ #define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \
176
+ case c10::IListRefTag::TAG: { \
177
+ using ImplT = c10::detail::IListRefTagImpl<IListRefTag::TAG, T>; \
178
+ auto& this_ = ImplT::unwrap(*this); \
179
+ BODY \
180
+ } break;
181
+
182
+ /*
183
+ * Dispatches the unwrap call, depending on `TAG`, followed by
184
+ * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`.
185
+ *
186
+ * This macro is useful because it allows us to handle different
187
+ * types (that correspond to different tags) to be implemented
188
+ * only once. We can do it even when the implementation of the
189
+ * different tags aren't syntatically the same, by dispatching
190
+ * it to a function (e.g. `ImplT::<dispatch-function>(this_)`).
191
+ */
192
+ #define TORCH_ILISTREF_UNWRAP(TAG, BODY) \
193
+ switch (TAG) { \
194
+ TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \
195
+ break; \
196
+ default: \
197
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \
198
+ }
199
+
200
+ enum class IListRefTag {
201
+ #define DEFINE_TAG(tag, ...) tag,
202
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG)
203
+ #undef DEFINE_TAG
204
+ None
205
+ };
206
+
207
+ namespace detail {
208
+ /*
209
+ * Type alias that specifies whether we return a reference or a copy of `T`.
210
+ *
211
+ * What is this for?
212
+ * =================
213
+ * Since values in the boxed world are represented by an `IValue`, we also
214
+ * depend on whether it can be converted to a const-reference (`Tensor`) or
215
+ * has to create a new copy of `T` (`OptionalTensorRef`).
216
+ */
217
+ template <typename T>
218
+ using IListRefConstRef = typename ivalue_to_const_ref_overload_return<T>::type;
219
+
220
+ /*
221
+ * Interface that implements key functions for each `IListRefTag` type.
222
+ *
223
+ * What is this for?
224
+ * =================
225
+ * Given an `IListRef(Iterator)<T>`, some methods have to be implemented
226
+ * differently for each `TAG`. Therefore, the methods inside this class
227
+ * are used as dispatch targets for the different `IListRefTag` values.
228
+ *
229
+ * You should create an specialization of this class for each possible
230
+ * combination of `IListRefTag` type (except `None`) and element types
231
+ * (e.g. `Tensor`).
232
+ *
233
+ * What does it do?
234
+ * ================
235
+ * 1. defines static methods to be used as dispatch targets by both
236
+ * `IListRef<T>` and `IListRefIterator<T>` (see the implementation of
237
+ * `IListRefTagImplBase`).
238
+ *
239
+ * 2. defines the `elem_type` and `list_type` aliases that will be
240
+ * used in the definition of `IListRef<T>`. In general, we should do
241
+ * so by inheriting from `IListRefTagImplBase<TAG, T, ListElemT>`.
242
+ *
243
+ * [Note: IListRefTagImpl Specialization]
244
+ * ======================================
245
+ * For `IListRef(Iterator)<at::Tensor>`:
246
+ * - <IListRefTag::Unboxed, at::Tensor>
247
+ * - <IListRefTag::Boxed, at::Tensor>
248
+ * - <IListRefTag::Materialized, at::Tensor>
249
+ *
250
+ * For `IListRef(Iterator)<at::OptionalTensorRef>`:
251
+ * - <IListRefTag::Unboxed, at::OptionalTensorRef>
252
+ * - <IListRefTag::Boxed, at::OptionalTensorRef>
253
+ * - <IListRefTag::Materialized, at::OptionalTensorRef>
254
+ */
255
+ template <IListRefTag TAG, typename T>
256
+ class IListRefTagImpl {};
257
+
258
+ /*
259
+ * Base implementation of `IListRefTagImpl<TAG, T>` methods.
260
+ *
261
+ * What is this for?
262
+ * =================
263
+ * This should make adding specializations for new types easier. For
264
+ * example, one should be able to add a new type just by making its
265
+ * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`.
266
+ *
267
+ * You should create a partial specialization for this class only if
268
+ * you introduce a new `IListRefTag`. The idea being that there is one
269
+ * default implementation for each possible value of `IListRefTag`.
270
+ *
271
+ * What does it do?
272
+ * ================
273
+ * 1. defines `elem_type` as an alias to `ListElemT`.
274
+ *
275
+ * 1. defines `list_type` as an alias to the default container type
276
+ * that will hold a collection of `elem_type`. The idea being that
277
+ * all types tagged as `TAG` will have `list_type` as its container,
278
+ * with different `elem_type`.
279
+ *
280
+ * 3. defines the default implementation for each of the methods that
281
+ * are supposed to be defined on `IListRefTagImpl` specializations.
282
+ *
283
+ * 4. inheriting from `IListRefTagImplBase<TAG, T, ListElemT>` also means
284
+ * that the payload of the type `IListRef<T>` will be of type `list_type`
285
+ * when it is tagged as `TAG`.
286
+ */
287
+ template <IListRefTag TAG, typename T, typename ListElemT = T>
288
+ class IListRefTagImplBase {};
289
+
290
+ /*
291
+ * Materialized container for `IListRef<T>`.
292
+ *
293
+ * What is this for?
294
+ * =================
295
+ * Container that groups `T` references together. This exchanges the
296
+ * overhead of every method call from `IListRef<T>` for a dynamic allocation.
297
+ *
298
+ * You should use this container instead of `IListRef<T>` if:
299
+ *
300
+ * - You are going to iterate the list more than once
301
+ * - You need to repeatedly access arbitrary elements (using `operator[]`)
302
+ * What does it do?
303
+
304
+ * ================
305
+ * Removes the reference (&) from the type, and wraps it into a
306
+ * `std::reference_wrapper`. If `IListRefConstRef<T>` is not a
307
+ * reference type, then it's left unchanged.
308
+ */
309
+ template <typename T>
310
+ using _MaterializedIListRefElem = typename std::conditional<
311
+ std::is_reference<T>::value,
312
+ typename std::reference_wrapper<typename std::remove_reference<T>::type>,
313
+ T>::type;
314
+
315
+ template <typename T>
316
+ using MaterializedIListRefElem = _MaterializedIListRefElem<IListRefConstRef<T>>;
317
+
318
+ template <typename T>
319
+ using MaterializedIListRef = std::vector<MaterializedIListRefElem<T>>;
320
+
321
+ } // namespace detail
322
+
323
+ /*
324
+ * Iterator for `IListRef<T>`.
325
+ *
326
+ * What is it?
327
+ * ===========
328
+ * Currently, a `std::bidirectional_iterator` that wraps the iterator
329
+ * types defined for each of the `IListRefTag`.
330
+ *
331
+ * One should be able to use it, as if it were the unwrapped
332
+ * iterators themselves.
333
+
334
+ * What does it do?
335
+ * ================
336
+ * Similarly to `IListRef<T>`, this is a wrapper class. Specifically, it
337
+ * wraps each container's `const_iterator` type alias. So, for example,
338
+ * given that the container for `IListRefTag::Boxed` is `c10::List`, this
339
+ * iterator will wrap a `c10::List::const_iterator`.
340
+ *
341
+ * [Note: MSVC Iterator Debug]
342
+ * ===========================
343
+ * MSVC `vector<T>::iterator` implementation (used in the boxed variant)
344
+ * makes it so this union's destructor, copy-constructor (assignment), and
345
+ * move-constructor (assignment) are implicitly deleted.
346
+ *
347
+ * Therefore, we need to explicitly define them as needed. Follows a list
348
+ * of places where these are needed and their reason:
349
+ *
350
+ * - `Payload` destructor:
351
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2.
352
+ *
353
+ * - `IListRefIterator` destructor:
354
+ * same as above. However, we need to explicitly call the variant
355
+ * destructor explicitly.
356
+ *
357
+ * - `IListRefIterator` copy-constructor:
358
+ * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different
359
+ * than 0.
360
+ */
361
+ template <typename T>
362
+ class IListRefIterator {
363
+ private:
364
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
365
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
366
+ friend class detail::IListRefTagImplBase< \
367
+ IListRefTag::TAG, \
368
+ T, \
369
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
370
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
371
+ #undef DEFINE_FRIEND_CLASS
372
+
373
+ public:
374
+ // C++17 friendly std::iterator implementation
375
+ using iterator_category = std::bidirectional_iterator_tag;
376
+ using value_type = T;
377
+ using difference_type = std::ptrdiff_t;
378
+ using pointer = T*;
379
+ using reference = T&;
380
+
381
+ using unboxed_iterator_type = typename detail::
382
+ IListRefTagImpl<IListRefTag::Unboxed, T>::list_type::const_iterator;
383
+ using boxed_iterator_type = typename detail::
384
+ IListRefTagImpl<IListRefTag::Boxed, T>::list_type::const_iterator;
385
+ using materialized_iterator_type =
386
+ typename detail::MaterializedIListRef<T>::const_iterator;
387
+
388
+ IListRefIterator() : tag_(IListRefTag::None) {}
389
+
390
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0
391
+ // See [Note: MSVC Iterator Debug]
392
+ IListRefIterator(const IListRefIterator& iterator)
393
+ : tag_(iterator.tag_) {
394
+ switch (tag_) {
395
+ case IListRefTag::Boxed:
396
+ payload_.boxed_iterator = iterator.payload_.boxed_iterator;
397
+ break;
398
+ case IListRefTag::Unboxed:
399
+ payload_.unboxed_iterator = iterator.payload_.unboxed_iterator;
400
+ break;
401
+ case IListRefTag::Materialized:
402
+ payload_.materialized_iterator = iterator.payload_.materialized_iterator;
403
+ break;
404
+ default:
405
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
406
+ }
407
+ }
408
+ #endif
409
+
410
+ #if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2
411
+ // See [Note: MSVC Iterator Debug]
412
+ ~IListRefIterator() noexcept(false) {
413
+ switch (tag_) {
414
+ case IListRefTag::Boxed:
415
+ payload_.boxed_iterator.~boxed_iterator_type();
416
+ break;
417
+ case IListRefTag::Unboxed:
418
+ payload_.unboxed_iterator.~unboxed_iterator_type();
419
+ break;
420
+ case IListRefTag::Materialized:
421
+ payload_.materialized_iterator.~materialized_iterator_type();
422
+ break;
423
+ default:
424
+ TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag.");
425
+ }
426
+ }
427
+ #endif
428
+
429
+ IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) {
430
+ payload_.boxed_iterator = boxed;
431
+ }
432
+
433
+ IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) {
434
+ payload_.unboxed_iterator = unboxed;
435
+ }
436
+
437
+ IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) {
438
+ payload_.materialized_iterator = materialized;
439
+ }
440
+
441
+ detail::IListRefConstRef<T> operator*() const {
442
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); });
443
+ }
444
+
445
+ IListRefIterator& operator++() {
446
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
447
+ return *this;
448
+ }
449
+
450
+ IListRefIterator operator++(int) {
451
+ auto old = *this;
452
+ TORCH_ILISTREF_UNWRAP(tag_, { ++this_; });
453
+ return old;
454
+ }
455
+
456
+ IListRefIterator& operator--() {
457
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
458
+ return *this;
459
+ }
460
+
461
+ IListRefIterator operator--(int) {
462
+ auto old = *this;
463
+ TORCH_ILISTREF_UNWRAP(tag_, { --this_; });
464
+ return old;
465
+ }
466
+
467
+ bool operator==(const IListRefIterator& rhs) const {
468
+ if (tag_ != rhs.tag_) {
469
+ return false;
470
+ }
471
+ TORCH_ILISTREF_UNWRAP(tag_, {
472
+ auto& rhs_it = ImplT::unwrap(rhs);
473
+ return this_ == rhs_it;
474
+ });
475
+ }
476
+
477
+ bool operator!=(const IListRefIterator& rhs) const {
478
+ return !(*this == rhs);
479
+ }
480
+
481
+ private:
482
+ union Payload {
483
+ boxed_iterator_type boxed_iterator;
484
+ unboxed_iterator_type unboxed_iterator;
485
+ materialized_iterator_type materialized_iterator;
486
+ void* _init_ptr;
487
+ Payload() : _init_ptr(nullptr) {}
488
+ #if defined(_MSC_VER)
489
+ // See [Note: MSVC Iterator Debug]
490
+ ~Payload() {}
491
+ #endif
492
+ };
493
+
494
+ Payload payload_;
495
+ IListRefTag tag_;
496
+ };
497
+
498
+ /*
499
+ * See [Note: IListRef]
500
+ */
501
+ template <typename T>
502
+ class IListRef {
503
+ private:
504
+ #define DEFINE_FRIEND_CLASS(TAG, ...) \
505
+ friend class detail::IListRefTagImpl<IListRefTag::TAG, T>; \
506
+ friend class detail::IListRefTagImplBase< \
507
+ IListRefTag::TAG, \
508
+ T, \
509
+ typename detail::IListRefTagImpl<IListRefTag::TAG, T>::elem_type>;
510
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS)
511
+ #undef DEFINE_FRIEND_CLASS
512
+
513
+ public:
514
+ using unboxed_type =
515
+ typename detail::IListRefTagImpl<IListRefTag::Unboxed, T>::list_type;
516
+ using boxed_type =
517
+ typename detail::IListRefTagImpl<IListRefTag::Boxed, T>::list_type;
518
+ using materialized_type =
519
+ typename detail::MaterializedIListRef<T>;
520
+
521
+ using iterator = IListRefIterator<T>;
522
+ using const_iterator = IListRefIterator<T>;
523
+ using reverse_iterator = std::reverse_iterator<iterator>;
524
+ using value_type = typename iterator::value_type;
525
+
526
+ IListRef() : tag_(IListRefTag::None) {}
527
+
528
+ IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) {
529
+ payload_.boxed = &boxed;
530
+ }
531
+
532
+ IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) {
533
+ payload_.unboxed = unboxed;
534
+ }
535
+
536
+ IListRef(const std::initializer_list<T>& list) : tag_(IListRefTag::Unboxed) {
537
+ payload_.unboxed = at::ArrayRef<T>(list);
538
+ }
539
+
540
+ template <
541
+ typename... UnboxedConstructorArgs,
542
+ typename = std::enable_if_t<
543
+ std::is_constructible<unboxed_type, UnboxedConstructorArgs...>::value>>
544
+ IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) {
545
+ payload_.unboxed = unboxed_type(std::forward<UnboxedConstructorArgs>(args)...);
546
+ }
547
+
548
+ IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) {
549
+ payload_.materialized = &materialized;
550
+ }
551
+
552
+ size_t size() const {
553
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); });
554
+ }
555
+
556
+ bool empty() const {
557
+ return size() == 0;
558
+ }
559
+
560
+ iterator begin() const {
561
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); });
562
+ }
563
+
564
+ iterator end() const {
565
+ TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); });
566
+ }
567
+
568
+ detail::IListRefConstRef<T> front() const {
569
+ TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); });
570
+ }
571
+
572
+ /*
573
+ * Materializes the `IListRef` into a `std::vector`.
574
+ *
575
+ * This should be used when one wishes to either:
576
+ *
577
+ * - iterate over the list more than once: each `IListRefIterator`
578
+ * member function call has to go through a switch, introducing
579
+ * non-negligible overhead
580
+ *
581
+ * - randomly access an arbitrary element using `operator[]`:
582
+ * same reason as above
583
+ */
584
+ detail::MaterializedIListRef<T> materialize() const {
585
+ if (isMaterialized()) {
586
+ return toMaterialized();
587
+ }
588
+
589
+ detail::MaterializedIListRef<T> materialized;
590
+ materialized.reserve(size());
591
+ for (const auto& t : *this) {
592
+ materialized.emplace_back(t);
593
+ }
594
+ return materialized;
595
+ }
596
+
597
+ #define DEFINE_CHECK(TAG, ...) \
598
+ bool is##TAG() const { \
599
+ return tag_ == IListRefTag::TAG; \
600
+ }
601
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK);
602
+ #undef DEFINE_CHECK
603
+
604
+ bool isNone() const {
605
+ return tag_ == IListRefTag::None;
606
+ }
607
+
608
+ #define DEFINE_CASTING(TAG, ...) \
609
+ const typename detail::IListRefTagImpl<IListRefTag::TAG, T>::list_type& \
610
+ to##TAG() const { \
611
+ TORCH_INTERNAL_ASSERT(is##TAG()); \
612
+ return detail::IListRefTagImpl<IListRefTag::TAG, T>::unwrap(*this); \
613
+ }
614
+ TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING);
615
+ #undef DEFINE_CASTING
616
+
617
+ private:
618
+ union Payload {
619
+ const boxed_type* boxed;
620
+ unboxed_type unboxed;
621
+ const materialized_type* materialized;
622
+ Payload() : boxed(nullptr) {}
623
+ };
624
+
625
+ Payload payload_;
626
+ IListRefTag tag_;
627
+ };
628
+
629
+ } // namespace c10
630
+
631
+ #include <ATen/core/IListRef_inl.h>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // The legacy mechanism for dispatching operators in ATen is a Type
4
+ // object, which is essentially a giant virtual dispatch table
5
+ // for every operation we support dynamically dispatching over.
6
+ //
7
+ // This has been deprecated in favor of ATenDispatch, and in the future,
8
+ // c10 dispatcher.
9
+ // TODO: Clean up what remains here
10
+
11
+ #include <c10/core/impl/LocalDispatchKeySet.h>
12
+
13
+ namespace at {
14
+
15
+ // A RAII, thread local (!) guard that will disable dispatch to variable
16
+ // handler.
17
+ //
18
+ // NOTE [ Treating Variables as non-Variables in type dispatch ]
19
+ //
20
+ // What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes
21
+ // dispatches on ATen functions to go to the non-variable implementation,
22
+ // bypassing autograd handling (and also profiling and tracing).
23
+ //
24
+ // To understand why this guard exists, it's helpful to understand the history
25
+ // behind how Variable was implemented. Previously, Variables were implemented
26
+ // as a wrapper on Tensors; so the act of processing a Variable involved
27
+ // unwrapping the underlying Tensor, and then calling the underlying base
28
+ // operation on /that/ operation
29
+ //
30
+ // However, after the Variable/Tensor merge, there is no concept of unwrapping
31
+ // a tensor anymore. If you just call the operation on the same variable
32
+ // again inside your VariableType handler, you'll dispatch back to
33
+ // VariableType, which is not what we want.
34
+ //
35
+ // The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which
36
+ // when enabled will cause `legacyTensorType()` and `getType()` to always return
37
+ // non-Variable type, even if the tensor being called on is a variable.
38
+
39
+ /* Note [AutoDispatchBelowAutograd]
40
+ * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used
41
+ * for kernel implementations and customized C++ kernels.
42
+ * If you are looking for a guard to run workload in inference mode, please use
43
+ * c10::InferenceMode RAII which is user facing API.
44
+ * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode)
45
+ * was used in the user code for inference-only workload, this was under risk of
46
+ * producing wrong results silently in some edge cases. For example:
47
+ * ```
48
+ * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true);
49
+ * torch::Tensor out = s * s;
50
+ * {
51
+ * at::AutoDispatchBelowAutograd guard;
52
+ * s.add_(1); // Skips version bump on `s`.
53
+ * }
54
+ * // WRONG GRADIENT! s.grad() are now computed using `s` value after the
55
+ * // inplace update.
56
+ * out.backward(torch::ones_like(out));
57
+ * ```
58
+ * Users should use `c10::InferenceMode` here so that it'll properly throw an
59
+ * error saying "one of the variables needed for gradient computation has be modified."
60
+ */
61
+ struct TORCH_API AutoDispatchBelowAutograd {
62
+ AutoDispatchBelowAutograd() :
63
+ autograd_guard_(c10::autograd_dispatch_keyset) {
64
+ }
65
+
66
+ // disable all autograd dispatch keys
67
+ c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
68
+ };
69
+
70
+ // TODO: AutoNonVariableTypeMode should be removed in release 1.10.
71
+ struct TORCH_API AutoNonVariableTypeMode {
72
+ AutoNonVariableTypeMode(bool enabled = true) :
73
+ autograd_guard_(c10::autograd_dispatch_keyset) {
74
+ TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. "
75
+ "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, "
76
+ "If you are looking for a user facing API to enable running your inference-only "
77
+ "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code "
78
+ "is under risk of producing silent wrong result in some edge cases. "
79
+ "See Note [AutoDispatchBelowAutograd] for more details.");
80
+ TORCH_INTERNAL_ASSERT(enabled);
81
+ }
82
+
83
+ // disable all autograd dispatch keys
84
+ c10::impl::ExcludeDispatchKeyGuard autograd_guard_;
85
+ };
86
+
87
+ struct TORCH_API AutoDispatchSkipFunctionalize {
88
+ AutoDispatchSkipFunctionalize() :
89
+ dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) {
90
+ }
91
+ c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
92
+ };
93
+
94
+ /* Note [AutoDispatchBelowADInplaceOrView]
95
+ * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode
96
+ * before we split inplace & view ops out of VariableType kernel.
97
+ * Note this guard is used in VariableType kernels for functional ops
98
+ * as well as ADInplaceOrView kernels for inplace/view ops to enforce the
99
+ * Invariant:
100
+ * Once you are in VariableType/ADInplaceOrView kernel for an op,
101
+ * you never go back to a kernel on same dispatch key until
102
+ * you finish the current op.
103
+ */
104
+ struct TORCH_API AutoDispatchBelowADInplaceOrView {
105
+ AutoDispatchBelowADInplaceOrView() :
106
+ dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) {
107
+ }
108
+ // disable Autograd & ADInplaceOrView dispatch keys
109
+ c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_;
110
+ };
111
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/ScalarType.h>
4
+ #include <c10/core/QScheme.h>
5
+ #include <c10/util/intrusive_ptr.h>
6
+
7
+ namespace at {
8
+
9
+ class Tensor;
10
+ struct QTensorImpl;
11
+ struct Quantizer;
12
+ using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
13
+ using QuantizerPtr = c10::intrusive_ptr<Quantizer>;
14
+
15
+ /**
16
+ * Quantizer is the class for storing all the information
17
+ * that's necessary to perform quantize and dequantize
18
+ * operation.
19
+ *
20
+ * We might have different types of quantization schemes and this is
21
+ * the base class for all quantizers.
22
+ *
23
+ * QTensorImpl will hold a pointer to Quantizer so that we can support
24
+ * different quantization schemes on Tensor.
25
+ *
26
+ * For example, the most common quantization scheme, Affine Quantization,
27
+ * requires scale and zero_point as parameters, we'll store scale and zero_point
28
+ * inside the instance and we can use it to quantize a float Tensor or
29
+ * dequantize a quantized Tensor.
30
+ *
31
+ * When you add new types of leaf Quantizer class, please also
32
+ * make sure to add a corresponding QScheme enum since
33
+ * they should have one to one mapping.
34
+ *
35
+ * Note about intrusive_ptr:
36
+ * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can
37
+ * share the same Quantizer. Quantizer should be immutable.
38
+ */
39
+ struct TORCH_API Quantizer : public c10::intrusive_ptr_target {
40
+ const ScalarType scalar_type_;
41
+ explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {}
42
+ ~Quantizer() override;
43
+
44
+ // Copied from torch/csrc/jit/ir/scope.h
45
+ QuantizerPtr intrusive_from_this() {
46
+ c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer
47
+ // from a raw `this` pointer
48
+ // so we need to bump the refcount
49
+ // to account for this ownership
50
+ return c10::intrusive_ptr<Quantizer>::reclaim(this);
51
+ }
52
+
53
+ /**
54
+ * Each concrete Quantizer type should have a unique QScheme type.
55
+ */
56
+ virtual QScheme qscheme() const = 0;
57
+
58
+ ScalarType scalar_type() const {
59
+ return scalar_type_;
60
+ }
61
+
62
+ /**
63
+ * quantize a float Tensor into a quantized Tensor.
64
+ */
65
+ virtual Tensor quantize(const Tensor& t) = 0;
66
+
67
+ /**
68
+ * dequantize a quantized Tensor into a float Tensor.
69
+ */
70
+ virtual Tensor dequantize(const Tensor& t) = 0;
71
+
72
+ /**
73
+ * dequantize a quantized Tensor into a float Tensor, out= variant
74
+ */
75
+ virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0;
76
+
77
+ /**
78
+ * Compare against `other` for equality.
79
+ */
80
+ virtual bool equalTo(QuantizerPtr other) const = 0;
81
+ };
82
+
83
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <ATen/core/Tensor.h>
5
+
6
+ // A little explanation about why this file exists at all. We have
7
+ // a few methods on Tensor class which require access to reified access to
8
+ // AutogradMeta. In open source, this isn't a big deal: we just access
9
+ // torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and
10
+ // we can put the definitions inline. This is because everything gets balled
11
+ // into a single dynamic library in the end.
12
+ //
13
+ // However, inside our Facebook internal version of our build system, we
14
+ // have a split between aten and torch/csrc. So we cannot simply just
15
+ // cross this boundary. "Now wait," you might say, "Why don't we just
16
+ // merge the libraries inside Facebook". Well, the problem is that there
17
+ // are some downstream applications which are at binary size limit, and
18
+ // incorporating all of the extra code from libtorch would push them
19
+ // over (admarket/adreview/service:adreviewservice, see also
20
+ // https://github.com/pytorch/pytorch/pull/29299) So if you want to do that,
21
+ // we have to fix all of the services like this.
22
+ //
23
+ // I didn't want to block eliminating Tensor-Variable on this work, so I
24
+ // had to introduce another dynamic dispatch to get to the variable
25
+ // implementations (which live in torch/csrc/autograd/variable.cpp, FYI).
26
+ //
27
+ // I also considered using our existing dynamic dispatch mechanism, c10
28
+ // dispatcher, to do this. However, (1) some of the functions on Tensor
29
+ // have weird signatures that are not supported by autograd, and (2)
30
+ // see this bug https://github.com/pytorch/pytorch/issues/30102
31
+
32
+ namespace torch { namespace autograd {
33
+
34
+ struct Node;
35
+
36
+ }} // namespace torch::autograd
37
+
38
+ namespace at {
39
+ namespace impl {
40
+
41
+ struct TORCH_API VariableHooksInterface {
42
+ virtual ~VariableHooksInterface() = default;
43
+ virtual TensorBase tensor_data(const TensorBase&) const = 0;
44
+ virtual TensorBase variable_data(const TensorBase&) const = 0;
45
+ virtual const std::shared_ptr<torch::autograd::Node>& grad_fn(const TensorBase&) const = 0;
46
+ virtual unsigned _register_hook(
47
+ const TensorBase&,
48
+ std::function<TensorBase(const TensorBase&)> hook) const = 0;
49
+ virtual void remove_hook(const TensorBase&, unsigned pos) const = 0;
50
+ virtual bool is_view(const TensorBase&) const = 0;
51
+ virtual const TensorBase& base(const TensorBase&) const = 0;
52
+ virtual const std::string& name(const TensorBase&) const = 0;
53
+ virtual bool is_leaf(const TensorBase&) const = 0;
54
+ virtual int64_t output_nr(const TensorBase&) const = 0;
55
+ virtual void set_data(const TensorBase&, const TensorBase&) const = 0;
56
+ virtual TensorBase data(const TensorBase&) const = 0;
57
+ virtual int64_t _version(const TensorBase&) const = 0;
58
+ virtual void retain_grad(const TensorBase&) const = 0;
59
+ virtual bool retains_grad(const TensorBase&) const = 0;
60
+ virtual void _backward(const Tensor&, TensorList, const c10::optional<Tensor>&, c10::optional<bool>, bool) const = 0;
61
+ virtual void requires_grad_(const TensorBase&, bool) const = 0;
62
+ virtual void basic_autograd_not_implemented_fallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const = 0;
63
+ };
64
+
65
+ TORCH_API void SetVariableHooks(VariableHooksInterface* hooks);
66
+ TORCH_API VariableHooksInterface* GetVariableHooks();
67
+ TORCH_API bool HasVariableHooks();
68
+
69
+ struct TORCH_API VariableHooksRegisterer {
70
+ explicit VariableHooksRegisterer(VariableHooksInterface* hooks) {
71
+ SetVariableHooks(hooks);
72
+ }
73
+ };
74
+
75
+ }} // namespace at::impl
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstdint>
4
+ #include <memory>
5
+ #include <type_traits>
6
+
7
+ #include <ATen/core/jit_type_base.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace c10 {
11
+
12
+ using DynamicTypeBits = std::uint32_t;
13
+ #define DYNAMIC_TYPE_BIT(x) (1u << x)
14
+
15
+ constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31);
16
+ constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30);
17
+
18
+ constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1);
19
+ constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3);
20
+ constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4);
21
+ constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5);
22
+ constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7);
23
+ constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8);
24
+ constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10);
25
+
26
+ #define FORALL_DYNAMIC_TYPES(_) \
27
+ _(Tensor, DYNAMIC_TYPE_BIT(0), 1) \
28
+ _(None, kDynamicNoneTypeBit, 1) \
29
+ _(Bool, DYNAMIC_TYPE_BIT(2), 1) \
30
+ _(Int, kDynamicIntTypeBit, 1) \
31
+ _(Float, kDynamicFloatTypeBit, 1) \
32
+ _(Complex, kDynamicComplexTypeBit, 1) \
33
+ _(Number, \
34
+ (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \
35
+ 1) \
36
+ _(String, DYNAMIC_TYPE_BIT(6), 1) \
37
+ _(List, kDynamicListTypeBit, 0) \
38
+ _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \
39
+ _(Dict, DYNAMIC_TYPE_BIT(9), 0) \
40
+ _(Class, kDynamicClassTypeBit, 0) \
41
+ _(Optional, \
42
+ (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \
43
+ 0) \
44
+ _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \
45
+ _(AnyTuple, \
46
+ (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \
47
+ 1) \
48
+ _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \
49
+ _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \
50
+ _(Capsule, DYNAMIC_TYPE_BIT(14), 1) \
51
+ _(Generator, DYNAMIC_TYPE_BIT(15), 1) \
52
+ _(Storage, DYNAMIC_TYPE_BIT(16), 1) \
53
+ _(Var, DYNAMIC_TYPE_BIT(17), 0) \
54
+ _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \
55
+ _(QScheme, DYNAMIC_TYPE_BIT(18), 1) \
56
+ _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \
57
+ _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \
58
+ _(RRef, DYNAMIC_TYPE_BIT(21), 0) \
59
+ _(Future, DYNAMIC_TYPE_BIT(22), 0) \
60
+ _(Await, DYNAMIC_TYPE_BIT(23), 0) \
61
+ _(Any, 0xffffffff, 1)
62
+
63
+ #define FORALL_DYNAMIC_TYPES_FAKE(_) \
64
+ _(ScalarType, kDynamicIntTypeBit, 1) \
65
+ _(Layout, kDynamicIntTypeBit, 1) \
66
+ _(SymInt, kDynamicIntTypeBit, 1) \
67
+ _(MemoryFormat, kDynamicIntTypeBit, 1)
68
+
69
+ #define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type;
70
+ FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE)
71
+ FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE)
72
+ #undef FORWARD_DECL_TYPE
73
+
74
+ class DynamicType;
75
+ using DynamicTypePtr = std::shared_ptr<DynamicType>;
76
+
77
+ /**
78
+ * DynamicType is designed as a low dependency type system for TorchScript. The
79
+ * existing JIT types are used for both compilation and runtime, which makes
80
+ * sense for server contexts because we often compile and run the model in
81
+ * the same process, however this doesn't hold for mobile devices where we
82
+ * always compiles a model ahead of time, therefore there will be dependencies
83
+ * which are not needed, but built with mobile runtime causing binary size
84
+ * bloat, by design. Every basic type like Int, Bool or String will bring their
85
+ * vtable, typeinfo, constructor, destructor and even more data from their
86
+ * specializations for STL types to the binary causing a long tail bloat.
87
+ *
88
+ * The core problem is about the complexity to implement and maintain a single
89
+ * type system for both analysis and execution purposes. Although they should
90
+ * have the exactly same semantics, in practice implement a unified abstraction
91
+ * adds conceptual and representational overhead for both sides of the world.
92
+ *
93
+ * To address the issues, DynamicType implements a minimal subset of JIT types
94
+ * and uses a generic algorithm to test all subtyping relations. To achieve
95
+ * this, we assign each dynamic type a single integer tag to represent its
96
+ * semantics. More specifically, a dynamic type is defined as a set of "control
97
+ * bits" and "data bits", where control bits describe the special behavior when
98
+ * testing a type and data bits map to identity of each nominal type. We use bit
99
+ * operations to perform all the tests.
100
+ *
101
+ * For example, a "covariant bit" is a control bit used to describe if a type
102
+ * is covariant, right now the most used one is tuple type, and in addition to
103
+ * the control bit, tuple type's data bit is the 8th bit from the LSB. Control
104
+ * bits start from MSB and data bits start from LSB.
105
+ *
106
+ * If two types are equal, then they are subtype of each other, also if the bits
107
+ * from one type tag is subset of the other tag, it automatically becomes a
108
+ * subtype of the other. This simplifies the subtyping logic a lot, and over the
109
+ * long term it is possible to adopt this scheme on the server side as well.
110
+ * Special cases can be added but they generally should not take too much code
111
+ * size.
112
+ *
113
+ * DynamicType may or may not inherit from c10::Type because it's not the core
114
+ * requirement of DynamicType to interface with existing JIT types, but we might
115
+ * want to inherit from c10::Type to reduce the migration cost.
116
+ */
117
+ class DynamicType : public SharedType {
118
+ using ClassTypePtr = std::shared_ptr<const c10::ClassType>;
119
+
120
+ /**
121
+ * A implementation detail to support NamedTuple.
122
+ */
123
+ struct LabeledDynamicType {
124
+ c10::optional<std::string> label;
125
+ DynamicTypePtr ty;
126
+ explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {}
127
+
128
+ bool equals(const LabeledDynamicType& other) const;
129
+ bool isSubtypeOf(const LabeledDynamicType& other) const;
130
+ };
131
+
132
+ public:
133
+ // TODO Change Ptr to DynamicTypePtr when all migrations are done.
134
+ using Ptr = TypePtr;
135
+ using ElementType = DynamicType;
136
+ ~DynamicType() override;
137
+
138
+ struct Arguments {
139
+ Arguments() = default;
140
+ Arguments(c10::ArrayRef<TypePtr>);
141
+ Arguments(const std::vector<c10::string_view>&, c10::ArrayRef<TypePtr>);
142
+ std::vector<LabeledDynamicType> elems;
143
+ };
144
+
145
+ enum class Tag : DynamicTypeBits {
146
+ #define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL,
147
+ FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM)
148
+ FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM)
149
+ #undef DYNAMIC_TYPE_ITEM
150
+ };
151
+
152
+ bool equals(const Type& rhs) const override;
153
+ bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override;
154
+ std::string str() const override;
155
+ static const TypeKind Kind = TypeKind::DynamicType;
156
+ static TORCH_API DynamicTypePtr create(Type& ty);
157
+
158
+ explicit DynamicType(Tag, Arguments);
159
+ explicit DynamicType(Tag, c10::string_view, Arguments);
160
+
161
+ TypePtr containedType(size_t) const override;
162
+ size_t containedTypeSize() const override;
163
+ Tag tag() const {
164
+ return tag_;
165
+ }
166
+ const c10::optional<std::string>& name() const {
167
+ return name_;
168
+ }
169
+ const Arguments& arguments() const {
170
+ return arguments_;
171
+ }
172
+ TORCH_API TypeKind dynamicKind() const;
173
+
174
+ // Should be used only on the server side to restore static type information.
175
+ #ifndef C10_MOBILE
176
+ TORCH_API
177
+ #endif
178
+ TypePtr fallback() const;
179
+
180
+ private:
181
+ bool symmetric() const override {
182
+ return false;
183
+ }
184
+ friend struct Type;
185
+ static std::shared_ptr<const DynamicType> create(const Type& ty);
186
+ DynamicType(const Type& other);
187
+ bool equals(const DynamicType& other) const;
188
+
189
+ template <typename F>
190
+ bool compareArguments(const DynamicType& other, F&& f) const {
191
+ if (arguments_.elems.size() != other.arguments_.elems.size()) {
192
+ return false;
193
+ }
194
+ for (size_t i = 0; i < arguments_.elems.size(); i++) {
195
+ if (!f(arguments_.elems[i], other.arguments_.elems[i])) {
196
+ return false;
197
+ }
198
+ }
199
+ return true;
200
+ }
201
+
202
+ Tag tag_;
203
+ c10::optional<std::string> name_;
204
+ union {
205
+ Arguments arguments_;
206
+ ClassTypePtr class_;
207
+ };
208
+ };
209
+
210
+ template <typename T>
211
+ struct DynamicTypeTrait {
212
+ C10_NOINLINE static auto tagValue() {
213
+ TORCH_CHECK(false);
214
+ return DynamicType::Tag::Any;
215
+ }
216
+ };
217
+
218
+ namespace detail {
219
+ C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag);
220
+ }
221
+
222
+ #define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \
223
+ template <> \
224
+ struct TORCH_API DynamicTypeTrait<NAME##Type> { \
225
+ C10_ERASE static auto tagValue() { \
226
+ return DynamicType::Tag::NAME; \
227
+ } \
228
+ static constexpr bool isBaseType = IS_BASE_TYPE; \
229
+ template <typename T = const DynamicTypePtr&> \
230
+ static std::enable_if_t<isBaseType, T> getBaseType() { \
231
+ static auto type = detail::makeBaseType(tagValue()); \
232
+ return type; \
233
+ } \
234
+ }; // namespace c10
235
+ FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE)
236
+ FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE)
237
+ #undef DYNAMIC_TYPE_TAG_VALUE
238
+
239
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h ADDED
@@ -0,0 +1,719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <functional>
4
+ #include <memory>
5
+ #include <string>
6
+ #include <utility>
7
+
8
+ #include <ATen/core/qualified_name.h>
9
+ #include <ATen/core/type_ptr.h>
10
+ #include <c10/core/SymInt.h>
11
+ #include <c10/core/SymFloat.h>
12
+ #include <c10/core/SymBool.h>
13
+ #include <c10/core/SymIntArrayRef.h>
14
+ #include <c10/macros/Macros.h>
15
+ #include <c10/util/ArrayRef.h>
16
+ #include <c10/util/Exception.h>
17
+ #include <c10/util/Optional.h>
18
+
19
+ namespace c10 {
20
+
21
+ #define C10_FORALL_TYPES(_) \
22
+ _(AnyType) \
23
+ _(EnumType) \
24
+ _(AnyEnumType) \
25
+ _(TensorType) \
26
+ _(StorageType) \
27
+ _(TupleType) \
28
+ _(ListType) \
29
+ _(DictType) \
30
+ _(NumberType) \
31
+ _(FloatType) \
32
+ _(ComplexType) \
33
+ _(FutureType) \
34
+ _(AwaitType) \
35
+ _(RRefType) \
36
+ _(IntType) \
37
+ _(NoneType) \
38
+ _(StringType) \
39
+ _(GeneratorType) \
40
+ _(QuantizerType) \
41
+ _(BoolType) \
42
+ _(OptionalType) \
43
+ _(VarType) \
44
+ _(DeviceObjType) \
45
+ _(StreamObjType) \
46
+ _(FunctionType) \
47
+ _(ClassType) \
48
+ _(PyObjectType) \
49
+ _(CapsuleType) \
50
+ _(InterfaceType) \
51
+ _(QSchemeType) \
52
+ _(ScalarTypeType) \
53
+ _(LayoutType) \
54
+ _(MemoryFormatType) \
55
+ _(AnyListType) \
56
+ _(AnyTupleType) \
57
+ _(AnyClassType) \
58
+ _(SymIntType) \
59
+ _(SymFloatType) \
60
+ _(SymBoolType) \
61
+ _(UnionType) \
62
+ _(DynamicType)
63
+
64
+ enum class TypeKind {
65
+ #define DEFINE_TYPE(T) T,
66
+ C10_FORALL_TYPES(DEFINE_TYPE)
67
+ #undef DEFINE_TYPE
68
+ };
69
+
70
+ TORCH_API const char* typeKindToString(TypeKind kind);
71
+
72
+ struct Type;
73
+ struct SharedType;
74
+
75
+ // Use this to customize how a Type is printed using `annotation_str()`. If
76
+ // c10::nullopt is returned, `annotation_str()` falls through to its default
77
+ // implementation.
78
+ using TypePrinter = std::function<c10::optional<std::string>(const Type&)>;
79
+
80
+ namespace detail {
81
+ template <typename T>
82
+ struct IsSingletonType : public std::integral_constant<bool, false> {};
83
+ } // namespace detail
84
+ #define TORCH_DECLARE_SINGLETON(Type) \
85
+ struct Type; \
86
+ namespace detail { \
87
+ template <> struct IsSingletonType<Type> : public std::integral_constant<bool, true> {}; \
88
+ }
89
+
90
+ TORCH_DECLARE_SINGLETON(AnyType);
91
+ TORCH_DECLARE_SINGLETON(AnyEnumType);
92
+ TORCH_DECLARE_SINGLETON(NumberType);
93
+ TORCH_DECLARE_SINGLETON(FloatType);
94
+ TORCH_DECLARE_SINGLETON(ComplexType);
95
+ TORCH_DECLARE_SINGLETON(IntType);
96
+ TORCH_DECLARE_SINGLETON(BoolType);
97
+ TORCH_DECLARE_SINGLETON(StringType);
98
+ TORCH_DECLARE_SINGLETON(StorageType);
99
+ TORCH_DECLARE_SINGLETON(NoneType);
100
+ TORCH_DECLARE_SINGLETON(GeneratorType);
101
+ TORCH_DECLARE_SINGLETON(QuantizerType);
102
+ TORCH_DECLARE_SINGLETON(QSchemeType);
103
+ TORCH_DECLARE_SINGLETON(DeviceObjType);
104
+ TORCH_DECLARE_SINGLETON(StreamObjType);
105
+ TORCH_DECLARE_SINGLETON(CapsuleType);
106
+ TORCH_DECLARE_SINGLETON(PyObjectType);
107
+ TORCH_DECLARE_SINGLETON(ScalarTypeType);
108
+ TORCH_DECLARE_SINGLETON(LayoutType);
109
+ TORCH_DECLARE_SINGLETON(MemoryFormatType);
110
+ TORCH_DECLARE_SINGLETON(AnyListType);
111
+ TORCH_DECLARE_SINGLETON(AnyTupleType);
112
+ TORCH_DECLARE_SINGLETON(AnyClassType);
113
+
114
+ namespace detail {
115
+ template <typename T, typename Enable = void>
116
+ struct CastReturnType {
117
+ using type = std::shared_ptr<T>;
118
+ };
119
+
120
+ template <typename T>
121
+ struct CastReturnType<T, typename std::enable_if<IsSingletonType<T>::value>::type> {
122
+ using type = SingletonTypePtr<T>;
123
+ };
124
+
125
+ template <typename T, typename Enable = void>
126
+ struct CastConstReturnType {
127
+ using type = std::shared_ptr<const T>;
128
+ };
129
+
130
+ template <typename T>
131
+ struct CastConstReturnType<T, typename std::enable_if<IsSingletonType<T>::value>::type> {
132
+ using type = SingletonTypePtr<const T>;
133
+ };
134
+
135
+ template <typename T>
136
+ struct as_shared_type {
137
+ using type = SharedType*;
138
+ };
139
+
140
+ template <typename T>
141
+ struct as_shared_type<const T*> {
142
+ using type = const SharedType *;
143
+ };
144
+ } // namespace detail
145
+
146
+ struct TORCH_API Type {
147
+ friend TORCH_API bool operator==(const Type& lhs, const Type& rhs);
148
+ private:
149
+ TypeKind kind_;
150
+
151
+ protected:
152
+ Type(TypeKind kind) : kind_(kind) {}
153
+
154
+ Type(const Type&) = default;
155
+ Type& operator=(const Type&) = default;
156
+ Type(Type&&) noexcept = default;
157
+ Type& operator=(Type&&) noexcept = default;
158
+
159
+ virtual std::string annotation_str_impl(TypePrinter /*printer*/) const {
160
+ return str();
161
+ }
162
+ // a == b
163
+ virtual bool equals(const Type& rhs) const = 0;
164
+ // a == b <=> b == a
165
+ virtual bool symmetric() const {
166
+ return true;
167
+ }
168
+
169
+ public:
170
+ template <typename T>
171
+ class SingletonOrSharedTypePtr {
172
+ public:
173
+ using element_type = typename std::shared_ptr<T>::element_type;
174
+
175
+ SingletonOrSharedTypePtr() = default;
176
+
177
+ /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr<T> x)
178
+ : repr_(std::move(x)) {}
179
+
180
+ template <typename U, std::enable_if_t<std::is_convertible<U*, T*>::value, bool> = true>
181
+ /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr<U> x)
182
+ : repr_(std::move(x)) {}
183
+
184
+ /* implicit */ SingletonOrSharedTypePtr(std::nullptr_t)
185
+ : repr_(nullptr) {}
186
+
187
+ /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<T> p)
188
+ : repr_(p) {}
189
+
190
+ template <typename U, std::enable_if_t<std::is_convertible<U*, T*>::value, bool> = true>
191
+ /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr<U> p)
192
+ : repr_(SingletonTypePtr<T>(p.get())) {}
193
+
194
+
195
+ // We need to support construction from T* for pybind. The problem
196
+ // is that it's not clear if we are supposed to be taking shared
197
+ // ownership or not.
198
+ //
199
+ // Case 1: if T is known statically to derive from SharedType, we should use
200
+ // shared_from_this() and take shared_ownership.
201
+ //
202
+ // Case 2: if T is exactly Type, we need to do a dynamic_cast to
203
+ // check if it's a SharedType and do the right thing.
204
+ //
205
+ // Case 3: Otherwise, T is not a SharedType. (debug-check this
206
+ // assumption!) Use a singleton pointer.
207
+
208
+ template <typename U = T, std::enable_if_t<std::is_base_of<SharedType, U>::value, bool> = true>
209
+ /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast<typename detail::as_shared_type<U>::type>(p)->shared_from_this()) {}
210
+
211
+ template <typename U = T, std::enable_if_t<std::is_same<Type, U>::value, bool> = true>
212
+ /* implicit */ SingletonOrSharedTypePtr(T* p) {
213
+ if (auto* shared_p = dynamic_cast<typename detail::as_shared_type<U>::type>(p)) {
214
+ repr_ = Repr(shared_p->shared_from_this());
215
+ } else {
216
+ repr_ = Repr(p);
217
+ }
218
+ }
219
+
220
+ template <typename U = T, std::enable_if_t<!std::is_same<Type, U>::value && !std::is_base_of<SharedType, U>::value, bool> = true>
221
+ /* implicit */ SingletonOrSharedTypePtr(T* p)
222
+ : repr_(p) {
223
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast<typename detail::as_shared_type<U>::type>(p) == nullptr);
224
+ }
225
+
226
+ SingletonOrSharedTypePtr(const SingletonOrSharedTypePtr&) = default;
227
+ SingletonOrSharedTypePtr(SingletonOrSharedTypePtr&&) noexcept = default;
228
+ SingletonOrSharedTypePtr& operator=(const SingletonOrSharedTypePtr&) = default;
229
+ SingletonOrSharedTypePtr& operator=(SingletonOrSharedTypePtr&&) noexcept = default;
230
+
231
+ T* get() const {
232
+ return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast<T*>(repr_.rawRepr().first);
233
+ }
234
+
235
+ operator bool() const {
236
+ return repr_.isNonNull();
237
+ }
238
+
239
+ bool operator==(std::nullptr_t) const {
240
+ return !repr_.isNonNull();
241
+ }
242
+
243
+ bool operator!=(std::nullptr_t) const {
244
+ return repr_.isNonNull();
245
+ }
246
+
247
+ template <typename U = T, std::enable_if_t<!std::is_same<std::remove_const_t<U>, void>::value, bool> = true>
248
+ U& operator*() const {
249
+ return *get();
250
+ }
251
+
252
+ T* operator->() const {
253
+ return get();
254
+ }
255
+
256
+ private:
257
+ // NOTE: SharedPtrWrapper exists to work around a baffling bug in
258
+ // nvcc; see comment in destroy() below.
259
+ struct SharedPtrWrapper {
260
+ SharedPtrWrapper(std::shared_ptr<T> &&x)
261
+ : repr_(std::move(x)) {}
262
+ std::shared_ptr<T> repr_;
263
+ };
264
+ union Repr {
265
+ Repr() : Repr(nullptr) {}
266
+
267
+ explicit Repr(std::shared_ptr<T> x)
268
+ : shared_(std::move(x)) {}
269
+
270
+ explicit Repr(std::nullptr_t)
271
+ : singletonRepr_(nullptr) {}
272
+
273
+ explicit Repr(SingletonTypePtr<T> p)
274
+ : singletonRepr_(p.get()) {}
275
+
276
+ ~Repr() {
277
+ destroy();
278
+ }
279
+
280
+ // NOTE: the only non-UB way to access our null state is through
281
+ // rawRepr(), because our copy operation doesn't preserve which
282
+ // union member is active for null pointers.
283
+ Repr(const Repr& rhs) {
284
+ if (rhs.isSharedAndNonNull()) {
285
+ new (&shared_) SharedPtrWrapper(rhs.shared_);
286
+ } else {
287
+ singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
288
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
289
+ singletonRepr_.unused_ = nullptr;
290
+ }
291
+ }
292
+
293
+ Repr(Repr&& rhs) noexcept {
294
+ if (rhs.isSharedAndNonNull()) {
295
+ new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
296
+ } else {
297
+ singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
298
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr);
299
+ singletonRepr_.unused_ = nullptr;
300
+ }
301
+ }
302
+
303
+ Repr& operator=(const Repr& rhs) {
304
+ if (&rhs == this) {
305
+ return *this;
306
+ }
307
+ if (rhs.isSharedAndNonNull()) {
308
+ if (isSharedAndNonNull()) {
309
+ shared_ = rhs.shared_;
310
+ } else {
311
+ new (&shared_) SharedPtrWrapper(rhs.shared_);
312
+ }
313
+ } else {
314
+ if (isSharedAndNonNull()) {
315
+ destroy();
316
+ }
317
+ singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
318
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
319
+ singletonRepr_.unused_ = nullptr;
320
+ }
321
+ return *this;
322
+ }
323
+
324
+ Repr& operator=(Repr&& rhs) noexcept {
325
+ if (&rhs == this) {
326
+ return *this;
327
+ }
328
+ if (rhs.isSharedAndNonNull()) {
329
+ if (isSharedAndNonNull()) {
330
+ shared_ = std::move(rhs.shared_);
331
+ } else {
332
+ new (&shared_) SharedPtrWrapper(std::move(rhs.shared_));
333
+ }
334
+ } else {
335
+ if (isSharedAndNonNull()) {
336
+ destroy();
337
+ }
338
+ singletonRepr_.singleton_ = static_cast<T*>(rhs.rawRepr().first);
339
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr);
340
+ singletonRepr_.unused_ = nullptr;
341
+ }
342
+ return *this;
343
+ }
344
+
345
+ SharedPtrWrapper shared_;
346
+
347
+ struct SingletonRepr {
348
+ explicit SingletonRepr(T* s) : singleton_(s) {}
349
+ T* singleton_;
350
+ void* unused_ = nullptr;
351
+ } singletonRepr_;
352
+ struct RawRepr {
353
+ void* first;
354
+ void* nullIfSingleton_;
355
+ };
356
+
357
+ // It is UB to read the singleton part of Repr if it was
358
+ // constructed as a shared_ptr and vice versa, but memcpying out
359
+ // the representation is always OK, so here's an accessor to obey
360
+ // the letter of the law.
361
+ RawRepr rawRepr() const {
362
+ RawRepr repr{};
363
+ memcpy(&repr, reinterpret_cast<const char *>(this), sizeof(RawRepr));
364
+ return repr;
365
+ }
366
+
367
+ bool isNonNull() const {
368
+ auto repr = rawRepr();
369
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr);
370
+ return repr.first != nullptr;
371
+ }
372
+
373
+ bool isSharedAndNonNull() const {
374
+ return rawRepr().nullIfSingleton_ != nullptr;
375
+ }
376
+
377
+ private:
378
+ void destroy() {
379
+ if (isSharedAndNonNull()) {
380
+ // Without SharedPtrWrapper, this line would read
381
+ // `shared_.~shared_ptr()` and nvcc would complain with
382
+ // "error: expected primary-expression before '>' token"
383
+ // referring to the "t" in "shared_ptr". SharedPtrWrapper
384
+ // exists to work around this compiler bug.
385
+ shared_.~SharedPtrWrapper();
386
+ }
387
+ }
388
+ } repr_;
389
+ };
390
+
391
+ using TypePtr = SingletonOrSharedTypePtr<Type>;
392
+ using Ptr = TypePtr;
393
+ using ElementType = Type;
394
+
395
+ // subtyping relation. By default, we return true for the case
396
+ // when the type is exactly equal or if this <: T where rhs = Optional[T]
397
+
398
+ // if this returns false and the why_not stream is non-null, it contains
399
+ // additional details that describe why this is not a subtype of 'rhs'.
400
+ // This additional information should only contain details that are not
401
+ // obvious from the annotation_str() that describes the type. For instance it
402
+ // is clear that `int <: str` is false but not clear why `Foo <: InterfaceBar`
403
+ // might be false.
404
+ virtual bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const;
405
+ virtual bool is_module() const;
406
+ bool isSubtypeOf(const Type& rhs) const {
407
+ return isSubtypeOfExt(rhs, nullptr);
408
+ }
409
+ // Compatibility shims to accommodate existing code that passes shared_ptrs
410
+ // around. Ideally, we would just delete this, but it should be harmless.
411
+ template <typename T>
412
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
413
+ isSubtypeOf(const std::shared_ptr<T>& rhs) const {
414
+ return isSubtypeOf(*rhs);
415
+ }
416
+
417
+ template <typename T>
418
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
419
+ isSubtypeOf(const SingletonOrSharedTypePtr<T>& rhs) const {
420
+ return isSubtypeOf(*rhs);
421
+ }
422
+
423
+ template <typename T>
424
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
425
+ isSubtypeOf(SingletonTypePtr<T> rhs) const {
426
+ return isSubtypeOf(*rhs);
427
+ }
428
+
429
+ template <typename T>
430
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
431
+ isSubtypeOfExt(const SingletonOrSharedTypePtr<T>& rhs, std::ostream* why_not) const {
432
+ return isSubtypeOfExt(*rhs, why_not);
433
+ }
434
+
435
+ template <typename T>
436
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
437
+ isSubtypeOfExt(const std::shared_ptr<T>& rhs, std::ostream* why_not) const {
438
+ return isSubtypeOfExt(*rhs, why_not);
439
+ }
440
+
441
+ template <typename T>
442
+ typename std::enable_if<std::is_base_of<Type, T>::value, bool>::type
443
+ isSubtypeOfExt(SingletonTypePtr<T> rhs, std::ostream* why_not) const {
444
+ return isSubtypeOfExt(*rhs, why_not);
445
+ }
446
+
447
+ // How this type will appear in FunctionSchema declarations
448
+ virtual std::string str() const = 0;
449
+
450
+ // How this type will appear as if it were a type annotation in Python
451
+ // which is sometimes different than how it appears in declarations (e.g.
452
+ // int[] vs List[int])
453
+ //
454
+ // Takes a custom printer that users can pass in to customize the output of
455
+ // this method.
456
+ std::string annotation_str(TypePrinter printer) const {
457
+ if (printer) {
458
+ // the printer can return nullopt to fall through to the default impl
459
+ if (auto renamed = printer(*this)) {
460
+ return *renamed;
461
+ }
462
+ }
463
+ return annotation_str_impl(std::move(printer));
464
+ }
465
+ std::string annotation_str() const {
466
+ // Overload instead of define a default value for `printer` to help
467
+ // debuggers out.
468
+ return annotation_str(nullptr);
469
+ }
470
+
471
+ // Returns a human readable string that includes additional information like
472
+ // "type is inferred rather than explicitly defined" to help construct more
473
+ // user-friendly messages.
474
+ virtual std::string repr_str() const {
475
+ return annotation_str();
476
+ }
477
+
478
+ TypeKind kind() const {
479
+ return kind_;
480
+ }
481
+
482
+ virtual bool isUnionType() const {
483
+ return false;
484
+ }
485
+
486
+ virtual bool requires_grad() const {
487
+ for (const auto& ct : containedTypes()) {
488
+ if (ct->requires_grad()) {
489
+ return true;
490
+ }
491
+ }
492
+ return false;
493
+ }
494
+
495
+ // Dynamically cast this object to the subclass indicated by the
496
+ // template variable, returning nullptr if the cast is invalid.
497
+ template <typename T, std::enable_if_t<!detail::IsSingletonType<T>::value, bool> = true>
498
+ typename detail::CastReturnType<T>::type cast() {
499
+ if (T::Kind == kind()) {
500
+ return std::static_pointer_cast<T>(static_cast<T*>(this)->shared_from_this());
501
+ }
502
+ return nullptr;
503
+ }
504
+ template <typename T, std::enable_if_t<detail::IsSingletonType<T>::value, bool> = true>
505
+ typename detail::CastReturnType<T>::type cast() {
506
+ if (T::Kind == kind()) {
507
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get());
508
+ return typename detail::CastReturnType<T>::type(static_cast<T*>(this));
509
+ }
510
+ return nullptr;
511
+ }
512
+ template <typename T, std::enable_if_t<!detail::IsSingletonType<T>::value, bool> = true>
513
+ typename detail::CastConstReturnType<T>::type cast() const {
514
+ if (T::Kind == kind()) {
515
+ return std::static_pointer_cast<const T>(static_cast<const T*>(this)->shared_from_this());
516
+ }
517
+ return nullptr;
518
+ }
519
+ template <typename T, std::enable_if_t<detail::IsSingletonType<T>::value, bool> = true>
520
+ typename detail::CastConstReturnType<T>::type cast() const {
521
+ if (T::Kind == kind()) {
522
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get());
523
+ return typename detail::CastConstReturnType<T>::type(static_cast<const T*>(this));
524
+ }
525
+ return nullptr;
526
+ }
527
+ template <typename T>
528
+ T* castRaw() {
529
+ if (T::Kind == kind()) {
530
+ return static_cast<T*>(this);
531
+ }
532
+ return nullptr;
533
+ }
534
+ template <typename T>
535
+ const T* castRaw() const {
536
+ if (T::Kind == kind()) {
537
+ return static_cast<const T*>(this);
538
+ }
539
+ return nullptr;
540
+ }
541
+ template <typename T>
542
+ auto expect() {
543
+ auto r = cast<T>();
544
+ AT_ASSERT(r);
545
+ return r;
546
+ }
547
+ template <typename T>
548
+ auto expect() const {
549
+ auto r = cast<const T>();
550
+ AT_ASSERT(r);
551
+ return r;
552
+ }
553
+ template <typename T>
554
+ T& expectRef() {
555
+ auto* r = castRaw<T>();
556
+ AT_ASSERT(r);
557
+ return *r;
558
+ }
559
+ template <typename T>
560
+ const T& expectRef() const {
561
+ auto* r = castRaw<const T>();
562
+ AT_ASSERT(r);
563
+ return *r;
564
+ }
565
+ virtual ~Type() = default;
566
+ virtual bool hasFreeVariables() const {
567
+ return false;
568
+ }
569
+ // list of types this type contains, e.g. for a List then element type of a
570
+ // list for a tuple, the types of the tuple elements
571
+ virtual at::ArrayRef<TypePtr> containedTypes() const {
572
+ return {};
573
+ }
574
+ virtual TypePtr containedType(size_t i) const {
575
+ return containedTypes().at(i);
576
+ }
577
+ virtual size_t containedTypeSize() const {
578
+ return containedTypes().size();
579
+ }
580
+ // create a new version of this type, replacing its contained types with
581
+ // contained_types
582
+ TypePtr withContained(std::vector<TypePtr> contained_types);
583
+ // per-type constructor, you only need to override this if the
584
+ // containedTypes() is not empty
585
+ virtual TypePtr createWithContained(
586
+ std::vector<TypePtr> /*contained_types*/) const {
587
+ AT_ERROR(
588
+ "type with contained types did not overload createWithContained: ",
589
+ str());
590
+ }
591
+
592
+ };
593
+
594
+ template <typename T>
595
+ using SingletonOrSharedTypePtr = Type::SingletonOrSharedTypePtr<T>;
596
+
597
+
598
+ template <typename T, typename U>
599
+ bool operator==(const SingletonOrSharedTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
600
+ return (void*)x.get() == (void*)y.get();
601
+ }
602
+
603
+ template <typename T, typename U>
604
+ bool operator==(const SingletonOrSharedTypePtr<T>& x, const std::shared_ptr<U>& y) {
605
+ return (void*)x.get() == (void*)y.get();
606
+ }
607
+
608
+ template <typename T, typename U>
609
+ bool operator==(const std::shared_ptr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
610
+ return (void*)x.get() == (void*)y.get();
611
+ }
612
+
613
+ template <typename T, typename U>
614
+ bool operator==(const SingletonOrSharedTypePtr<T>& x, const SingletonTypePtr<U>& y) {
615
+ return (void*)x.get() == (void*)y.get();
616
+ }
617
+
618
+ template <typename T, typename U>
619
+ bool operator==(const SingletonTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
620
+ return (void*)x.get() == (void*)y.get();
621
+ }
622
+
623
+ template <typename T, typename U>
624
+ bool operator!=(const SingletonOrSharedTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
625
+ return !(x == y);
626
+ }
627
+
628
+ template <typename T, typename U>
629
+ bool operator!=(const SingletonOrSharedTypePtr<T>& x, const std::shared_ptr<U>& y) {
630
+ return !(x == y);
631
+ }
632
+
633
+ template <typename T, typename U>
634
+ bool operator!=(const std::shared_ptr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
635
+ return !(x == y);
636
+ }
637
+
638
+ template <typename T, typename U>
639
+ bool operator!=(const SingletonOrSharedTypePtr<T>& x, const SingletonTypePtr<U>& y) {
640
+ return !(x == y);
641
+ }
642
+
643
+ template <typename T, typename U>
644
+ bool operator!=(const SingletonTypePtr<T>& x, const SingletonOrSharedTypePtr<U>& y) {
645
+ return !(x == y);
646
+ }
647
+
648
+ using TypePtr = SingletonOrSharedTypePtr<Type>;
649
+ using ConstTypePtr = SingletonOrSharedTypePtr<const Type>;
650
+
651
+ // Explicitly enable MaybeOwned<shared_ptr<T>>, rather than allowing
652
+ // MaybeOwned to be used for any type right away.
653
+ template <typename T>
654
+ struct MaybeOwnedTraits<SingletonOrSharedTypePtr<T>>
655
+ : public MaybeOwnedTraitsGenericImpl<SingletonOrSharedTypePtr<T>> {};
656
+
657
+ // Base class for Types that are guaranteed to be owned by std::shared_ptr.
658
+ struct TORCH_API SharedType : public Type, public std::enable_shared_from_this<SharedType> {
659
+ using Type::Type;
660
+ };
661
+
662
+ inline TypePtr Type::withContained(std::vector<TypePtr> contained_types) {
663
+ auto current_contained = containedTypes();
664
+ // Types with no contained_types don't need this call. Check before calling!
665
+ //
666
+ // (We can't support this efficiently because types without
667
+ // contained types may be singletons, in which case
668
+ // shared_from_this will crash; we would have to provide a virtual
669
+ // typeptr_from_this or isSingleton.)
670
+ TORCH_INTERNAL_ASSERT(!current_contained.empty() && current_contained.size() == contained_types.size());
671
+ if (current_contained.equals(contained_types)) {
672
+ return std::static_pointer_cast<Type>(static_cast<SharedType *>(this)->shared_from_this());
673
+ }
674
+ return createWithContained(std::move(contained_types));
675
+ }
676
+
677
+
678
+ TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) {
679
+ if (C10_UNLIKELY(!rhs.symmetric())) {
680
+ return rhs.equals(lhs);
681
+ }
682
+ return lhs.equals(rhs);
683
+ }
684
+
685
+ struct NamedType;
686
+ using NamedTypePtr = std::shared_ptr<NamedType>;
687
+ using ConstNamedTypePtr = std::shared_ptr<const NamedType>;
688
+
689
+ struct TORCH_API NamedType : public SharedType {
690
+ NamedType(TypeKind tk, c10::optional<QualifiedName> name)
691
+ : SharedType(tk), name_(std::move(name)) {
692
+ TORCH_INTERNAL_ASSERT(
693
+ tk == TypeKind::TupleType || tk == TypeKind::FunctionType ||
694
+ tk == TypeKind::ClassType || tk == TypeKind::InterfaceType ||
695
+ tk == TypeKind::EnumType,
696
+ "If you add a new kind of NamedType, ",
697
+ "please update the cast<NamedType> specialization and this assert");
698
+ }
699
+
700
+ // Fully qualified name of type
701
+ // Looks like: "foo.bar.Baz".
702
+ const c10::optional<QualifiedName>& name() const {
703
+ return name_;
704
+ }
705
+
706
+ private:
707
+ c10::optional<QualifiedName> name_;
708
+ };
709
+
710
+ } // namespace c10
711
+
712
+ namespace std {
713
+ template <typename T>
714
+ struct hash<c10::SingletonOrSharedTypePtr<T>> {
715
+ size_t operator()(const c10::SingletonOrSharedTypePtr<T>& x) const {
716
+ return std::hash<T*>()(x.get());
717
+ }
718
+ };
719
+ } // namespace std
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Tensor.h>
4
+ #include <ATen/TensorUtils.h>
5
+ #include <ATen/core/List.h>
6
+ #include <c10/core/TensorOptions.h>
7
+
8
+ /*
9
+ * [Note: hacky wrapper removal for optional tensor]
10
+ *
11
+ * The kernel implementation takes an optional tensor marked in the schema as
12
+ * Tensor? but the C++ function takes Tensor instead of the optional<Tensor>
13
+ * expected by the dispatcher.
14
+ *
15
+ * To remove the hacky wrapper, the C++ function is changed to take
16
+ * optional<Tensor> and unwrap the Tensor value at the beginning of
17
+ * the function, e.g.:
18
+ * > c10::MaybeOwned<Tensor> weight_maybe_owned =
19
+ * > at::borrow_from_optional_tensor(weight_opt);
20
+ * > const Tensor& weight = *weight_maybe_owned;
21
+ *
22
+ * We may want to make the kernel handle optional directly without
23
+ * going through the creation of a default-constructed Tensor in
24
+ * at::borrow_from_optional_tensor.
25
+ */
26
+
27
+ /*
28
+ * [Note: hacky wrapper removal for TensorOptions]
29
+ *
30
+ * The kernel implementation takes a TensorOptions argument but the dispatcher
31
+ * expects separate arguments for dtype, layout, device, pin_memory.
32
+ *
33
+ * To remove the hacky wrapper, the kernel implementation is changed to take
34
+ * the 4 arguments (dtype, layout, device, pin_memory), and assemble the
35
+ * TensorOptions value at the beginning of the function, e.g.:
36
+ * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout)
37
+ * > .device(device).pinned_memory(pin_memory);
38
+ *
39
+ * We may want make the kernel handle these parameters directly without going
40
+ * through the creation of a TensorOptions value.
41
+ */
42
+
43
+ namespace c10 {
44
+ namespace impl {
45
+
46
+ TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName);
47
+
48
+ inline void check_and_update_common_device(optional<Device>& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
49
+ // TODO: Remove this once the following issue is addressed:
50
+ // https://github.com/pytorch/pytorch/issues/57380
51
+ if (!tensor.defined()) {
52
+ return;
53
+ }
54
+
55
+ if (!common_device.has_value()) {
56
+ common_device = tensor.device();
57
+ return;
58
+ }
59
+
60
+ if (C10_UNLIKELY(common_device != tensor.device())) {
61
+ common_device_check_failure(*common_device, tensor, methodName, argName);
62
+ }
63
+ }
64
+
65
+ inline void check_and_update_common_device(optional<Device>& common_device, const optional<at::Tensor>& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) {
66
+ if (tensor.has_value()) {
67
+ check_and_update_common_device(common_device, tensor.value(), methodName, argName);
68
+ }
69
+ }
70
+
71
+ inline void check_and_update_common_device(optional<Device>& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
72
+ for (const auto& tensor : tensors) {
73
+ check_and_update_common_device(common_device, tensor, methodName, argName);
74
+ }
75
+ }
76
+
77
+ inline void check_and_update_common_device(optional<Device>& common_device, const List<optional<at::Tensor>>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) {
78
+ for (const auto& tensor : tensors) {
79
+ check_and_update_common_device(common_device, tensor, methodName, argName);
80
+ }
81
+ }
82
+ } // namespace impl
83
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // TODO: unify to C10_MOBILE. In theory this header could be used in OSS.
4
+ #ifdef TEMPLATE_SELECTIVE_BUILD
5
+ #include <ATen/selected_mobile_ops.h>
6
+ #endif
7
+
8
+ /**
9
+ * This header implements functionality to build PyTorch with only a certain
10
+ * set of operators (+ dependencies) included.
11
+ *
12
+ * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these
13
+ * two ops will be included in your build. The allowlist records operators
14
+ * only, no overloads; if you include aten::add, all overloads of aten::add
15
+ * will be included.
16
+ *
17
+ * Internally, this is done by removing the operator registration calls
18
+ * using compile time programming, and the linker will then prune all
19
+ * operator functions that weren't registered.
20
+ * See Note [Selective build] for more details
21
+ *
22
+ * WARNING: The allowlist mechanism doesn't work for all ways you could go about
23
+ * registering an operator. If the dispatch key / operator name is not
24
+ * sufficiently obvious at compile time, then the allowlisting mechanism
25
+ * will fail (and the operator will be included in the binary anyway).
26
+ */
27
+
28
+ #include <c10/util/string_view.h>
29
+ #include <c10/core/DispatchKey.h>
30
+ #include <c10/macros/Macros.h>
31
+
32
+
33
+ #if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
34
+ #include <ATen/record_function.h>
35
+ #endif
36
+
37
+ namespace c10 {
38
+
39
+ namespace impl {
40
+
41
+ constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare
42
+
43
+ /**
44
+ * In selective build mode returns true/false depending on whether a build
45
+ * feature is available or not.
46
+ *
47
+ * In instrumenting mode (tracing mode), always returns true, and doesn't
48
+ * trigger any side effects.
49
+ */
50
+ constexpr bool is_build_feature_available(const char* name) {
51
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE)
52
+ // Selective Build mode.
53
+ #if !defined(TORCH_BUILD_FEATURE_ALLOWLIST)
54
+ (void)name;
55
+ return true;
56
+ #else
57
+ return allowlist_contains(
58
+ C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST),
59
+ name);
60
+ #endif
61
+
62
+ #else
63
+ // Instrumenting mode.
64
+ (void)name;
65
+ return true;
66
+ #endif
67
+ }
68
+
69
+ [[noreturn]] void build_feature_required_feature_not_available(const char* feature);
70
+
71
+ /**
72
+ * Use BUILD_FEATURE_REQUIRED macro in user-code.
73
+ *
74
+ * In selective build mode becomes a no-op if the build feature passed
75
+ * in is available. If not available, throws an exception (c10::Error).
76
+ * The compiler is able to perform dead code elimination for code
77
+ * following this method if the build feature is not available.
78
+ *
79
+ * In instrumenting mode (tracing mode), registers (as a side effect)
80
+ * the presence of this specific build feature being triggered.
81
+ */
82
+ #if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode
83
+
84
+ #if defined(TORCH_BUILD_FEATURE_ALLOWLIST)
85
+ #define BUILD_FEATURE_REQUIRED(NAME) \
86
+ if (!c10::impl::is_build_feature_available(NAME)) { \
87
+ ::c10::impl::build_feature_required_feature_not_available(NAME); \
88
+ }
89
+ #else // Everything trivially selected
90
+ #define BUILD_FEATURE_REQUIRED(NAME)
91
+
92
+ #endif
93
+
94
+ #else // trace mode
95
+ #define BUILD_FEATURE_REQUIRED(NAME) \
96
+ RECORD_FUNCTION_WITH_SCOPE( \
97
+ at::RecordScope::BUILD_FEATURE, \
98
+ std::string(NAME), \
99
+ {});
100
+ #endif
101
+
102
+ // Use this macro, and not is_build_feature_available
103
+ #define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME)
104
+
105
+ // returns true iff allowlist contains item
106
+ // allowlist_contains("a;bc;d", "bc") == true
107
+ constexpr bool allowlist_contains(string_view allowlist, string_view item) {
108
+ //Choose a really big value for next so that if something goes wrong
109
+ //this code will blow up in a hopefully detectable way.
110
+ size_t next = std::numeric_limits<size_t>::max();
111
+ for (size_t cur = 0; cur <= allowlist.size(); cur = next) {
112
+ next = allowlist.find(';', cur);
113
+ if (next != string_view::npos) {
114
+ if (allowlist.substr(cur, next - cur).compare(item) == 0) {
115
+ return true;
116
+ }
117
+ next++;
118
+ } else {
119
+ if (allowlist.substr(cur).compare(item) == 0) {
120
+ return true;
121
+ }
122
+ break;
123
+ }
124
+ }
125
+ return false;
126
+ }
127
+
128
+ // Returns true iff the given op name is on the allowlist
129
+ // and should be registered
130
+ constexpr bool op_allowlist_check(string_view op_name) {
131
+ assert(op_name.find("::") != string_view::npos);
132
+ // Use assert() instead of throw() due to a gcc bug. See:
133
+ // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
134
+ // https://github.com/fmtlib/fmt/issues/682
135
+ assert(op_name.find("(") == string_view::npos);
136
+ #if !defined(TORCH_OPERATOR_WHITELIST)
137
+ // If the TORCH_OPERATOR_WHITELIST parameter is not defined,
138
+ // all ops are to be registered
139
+ return true;
140
+ #else
141
+ return allowlist_contains(
142
+ C10_STRINGIZE(TORCH_OPERATOR_WHITELIST),
143
+ // This function is majorly used for mobile selective build with
144
+ // root operators, where the overload is included in the allowlist.
145
+ op_name);
146
+ // // Strip overload name (as allowlist doesn't contain overloads)
147
+ // // Another function based on this may be added when there's usage
148
+ // // on op names without overload.
149
+ // OperatorNameView::parse(op_name).name);
150
+ #endif
151
+ }
152
+
153
+ // Returns true iff the given schema string is on the allowlist
154
+ // and should be registered
155
+ constexpr bool schema_allowlist_check(string_view schema) {
156
+ #if defined(TORCH_FORCE_SCHEMA_REGISTRATION)
157
+ return true;
158
+ #else
159
+ return op_allowlist_check(schema.substr(0, schema.find("(")));
160
+ #endif
161
+ }
162
+
163
+ // Returns true iff the given custom class name is on the allowlist
164
+ // and should be registered
165
+ constexpr bool custom_class_allowlist_check(string_view custom_class_name) {
166
+ #if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST)
167
+ // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined,
168
+ // all custom classes are to be registered
169
+ (void)custom_class_name;
170
+ return true;
171
+ #else
172
+ return allowlist_contains(
173
+ C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST),
174
+ custom_class_name);
175
+ #endif
176
+ }
177
+
178
+ // schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST.
179
+ // Add this API to pass arbitrary allowlist.
180
+ constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) {
181
+ return allowlist_contains(allowlist, schema.substr(0, schema.find("(")));
182
+ }
183
+
184
+ // Returns true iff the given dispatch key is on the allowlist
185
+ // and should be registered. When we turn this on, the list of valid
186
+ // mobile dispatch keys is hard coded (but you need to make sure
187
+ // that you have the correct set of dispatch keys for this).
188
+ constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) {
189
+ #ifdef C10_MOBILE
190
+ return true;
191
+ // Disabled for now: to be enabled later!
192
+ // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll;
193
+ #else
194
+ return true;
195
+ #endif
196
+ }
197
+
198
+ } // namespace impl
199
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/intrusive_ptr.h>
4
+ #include <ATen/core/type_ptr.h>
5
+
6
+ namespace c10 {
7
+
8
+ struct Type;
9
+ using worker_id_t = int16_t;
10
+
11
+ // This abstract class contains only user-facing APIs, and will be shared
12
+ // between jit and distributed to implement TorchScript support.
13
+ class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target {
14
+ public:
15
+ RRefInterface() = default;
16
+ // RRef is made NOT copyable NOT movable to prevent messing up reference
17
+ // counting.
18
+ RRefInterface(const RRefInterface& other) = delete;
19
+ RRefInterface(RRefInterface&& other) = delete;
20
+ RRefInterface& operator=(RRefInterface&& other) = delete;
21
+
22
+ ~RRefInterface() override = default;
23
+
24
+ // returns the worker id of the owner
25
+ virtual worker_id_t owner() const = 0;
26
+
27
+ // returns the worker name of the owner
28
+ virtual std::string ownerName() const = 0;
29
+
30
+ // Returns true if this is the ``OwnerRRef``
31
+ virtual bool isOwner() const = 0;
32
+
33
+ // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been
34
+ // confirmed by its owner.
35
+ virtual bool confirmedByOwner() const = 0;
36
+
37
+ virtual const TypePtr type() const = 0;
38
+ };
39
+
40
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states.
2
+ // These handles are tied to device, and these libraries requires/recommends not to
3
+ // share handles across host threads.
4
+ //
5
+ // These libraries recommend using one handle per host thread. We may not want to do
6
+ // this because threads are relatively light-weight, but creating and destroying
7
+ // handles is expensive (destroying the handle causes synchronizations). DataParallel,
8
+ // for example, creates new threads for each forward pass.
9
+ //
10
+ // This file implements a handle pool mechanism. The handle pool returns handles on
11
+ // demand as threads request them. If all existing handles in the pool are in use,
12
+ // it creates a new one. As threads terminate, they release handles back into the pool.
13
+ // In this way, the handle pool never creates more handles than the high-water mark of
14
+ // active threads, so it's efficient with DataParallel.
15
+
16
+ #pragma once
17
+
18
+ #include <unordered_map>
19
+ #include <vector>
20
+ #include <utility>
21
+ #include <mutex>
22
+ #include <memory>
23
+
24
+ #include <c10/util/Exception.h>
25
+
26
+ namespace at::cuda { namespace {
27
+
28
+ template <typename Handle_t, void Create(Handle_t *), void Destroy(Handle_t)>
29
+ struct DeviceThreadHandlePool : public std::enable_shared_from_this<DeviceThreadHandlePool<Handle_t, Create, Destroy>> {
30
+
31
+ struct Handle {
32
+ Handle_t handle;
33
+ Handle(bool create = false) : handle(nullptr)
34
+ {
35
+ if(create) Create(&handle);
36
+ }
37
+ // std::vector.emplace() and push_back() may route through temporaries and call
38
+ // copy/move constructors along the way. If this is the case, we don't want
39
+ // the destructors of temporaries to call cudnnDestroy on the handle.
40
+ // We can achieve safety (for the narrow case of stashing within std::vectors)
41
+ // by making Handle moveable but not copyable, and transferring handle ownership
42
+ // to the latest constructed object. This is not a substitute for full-blown
43
+ // reference counting, but reference counting may be overkill here.
44
+ // Another alternative is to wrap the saved Handles in unique_ptrs, i.e.,
45
+ // unordered_map<int, vector<unique_ptr<Handle>>> created_handles;
46
+ Handle(const Handle& rhs) = delete;
47
+ // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom
48
+ Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); }
49
+ // operator= takes argument by value
50
+ Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; }
51
+ ~Handle() {
52
+ if(handle) Destroy(handle);
53
+ }
54
+ };
55
+
56
+ std::mutex mutex;
57
+
58
+ // Handles are lazily created as different threads request them,
59
+ // but are never destroyed until the end of the process.
60
+ // The maximum number of handles this process will create for each device is equal
61
+ // to the high-water mark of the number of concurrently active threads that request
62
+ // handles for that device.
63
+ // When threads terminate, they release their handles back into the pool for reuse.
64
+ // Otherwise, new handles would be created every time new threads were spawned,
65
+ // resulting in poor performance for Python modules that repeatedly or frequently
66
+ // spawned new sets of threads (like DataParallel, which creates a new set of threads
67
+ // for each forward pass).
68
+ //
69
+ // To prevent potential deadlocks, we explicitly choose not to cap the number
70
+ // of handles that are created per device.
71
+ // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device,
72
+ // only 4 can make forward progress at any time. The other 4 will not release their
73
+ // handles until they exit, so the fifth cannot make progress until then. This is
74
+ // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an
75
+ // intermediate point (ie, before any of them have exited). We have no way to anticipate
76
+ // or enforce that user threads will not attempt such intermediate synchronization.
77
+ // The only way to ensure safety is to avoid imposing a cap on the number of handles.
78
+ std::unordered_map<int, std::vector<Handle>> created_handles;
79
+ std::unordered_map<int, std::vector<Handle_t>> available_handles;
80
+
81
+ // PoolWindow lazily creates and caches the handles that a particular thread is using,
82
+ // so in the common case handle access doesn't incur either handle creation or a mutex lock.
83
+ class PoolWindow
84
+ {
85
+ public:
86
+ PoolWindow(std::shared_ptr<DeviceThreadHandlePool> parent): weak_parent(std::move(parent)) {}
87
+ ~PoolWindow(){ release(); }
88
+
89
+ Handle_t reserve(int device)
90
+ {
91
+ // If this thread already has a handle for this device, return it
92
+ if(my_handles.find(device) != my_handles.end())
93
+ return my_handles[device];
94
+
95
+ // otherwise, either grab a handle from the pool if one is available,
96
+ // or if not, create a new one.
97
+ auto parent = weak_parent.lock();
98
+ TORCH_CHECK(parent, "Cannot create handle during program termination");
99
+ std::lock_guard<std::mutex> guard(parent->mutex);
100
+
101
+ if(parent->available_handles[device].size() > 0)
102
+ {
103
+ my_handles[device] = parent->available_handles[device].back();
104
+ parent->available_handles[device].pop_back();
105
+ }
106
+ else
107
+ {
108
+ // In local testing, I do observe that emplace_back sometimes routes through temporaries
109
+ // that incur move-constructor and destructor calls. See comments in Handle above.
110
+ parent->created_handles[device].emplace_back(true /*create*/);
111
+ my_handles[device] = parent->created_handles[device].back().handle;
112
+ }
113
+
114
+ return my_handles[device];
115
+ }
116
+
117
+ private:
118
+ // Stores the per-device handles currently owned by this thread
119
+ std::unordered_map<int, Handle_t> my_handles;
120
+
121
+ std::weak_ptr<DeviceThreadHandlePool> weak_parent;
122
+
123
+ // Called by the destructor. Releases this thread's handles back into the pool.
124
+ void release() {
125
+ if(my_handles.size() > 0) {
126
+ auto parent = weak_parent.lock();
127
+ if (!parent) {
128
+ // If this thread exits after atexit handlers have completed, the
129
+ // cuda context itself may be invalid, so we must leak the handles.
130
+ return;
131
+ }
132
+
133
+ std::lock_guard<std::mutex> guard(parent->mutex);
134
+ for(auto d_h : my_handles)
135
+ parent->available_handles[d_h.first].push_back(d_h.second);
136
+ }
137
+ }
138
+ };
139
+
140
+ // Warning:
141
+ // If you want to change this function, be aware that this function will be called
142
+ // by multiple threads and there is no mutex guarding the call of this function, so
143
+ // make sure your implementation is thread-safe.
144
+ PoolWindow *newPoolWindow() {
145
+ // The returned pointer will be owned by a thread local variable
146
+ // so that different threads does not share the same PoolWindow.
147
+ return new PoolWindow(this->shared_from_this());
148
+ }
149
+ };
150
+
151
+ }} // namespace at::cuda::detail::<anonymous>
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUContext.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Context.h>
4
+ #include <c10/xpu/XPUFunctions.h>
5
+ #include <c10/xpu/XPUStream.h>
6
+
7
+ namespace at::xpu {
8
+
9
+ // XPU is available if we compiled with XPU.
10
+ inline bool is_available() {
11
+ return c10::xpu::device_count() > 0;
12
+ }
13
+
14
+ TORCH_XPU_API DeviceProp* getCurrentDeviceProperties();
15
+
16
+ TORCH_XPU_API DeviceProp* getDeviceProperties(DeviceIndex device);
17
+
18
+ TORCH_XPU_API int32_t getGlobalIdxFromDevice(DeviceIndex device);
19
+
20
+ } // namespace at::xpu
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUDevice.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Context.h>
4
+ #include <c10/xpu/XPUFunctions.h>
5
+
6
+ namespace at::xpu {
7
+
8
+ inline Device getDeviceFromPtr(void* ptr) {
9
+ auto device = c10::xpu::get_device_idx_from_pointer(ptr);
10
+ return {c10::DeviceType::XPU, device};
11
+ }
12
+
13
+ } // namespace at::xpu
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUEvent.h ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/xpu/XPUContext.h>
3
+
4
+ #include <optional>
5
+
6
+ namespace at::xpu {
7
+
8
+ /*
9
+ * XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are
10
+ * constructed lazily when first recorded. It has a device, and this device is
11
+ * acquired from the first recording stream. Later streams that record the event
12
+ * must match the same device.
13
+ *
14
+ * Currently, XPUEvent does NOT support to export an inter-process event from
15
+ * another process via inter-process comunication(IPC). So it means that
16
+ * inter-process communication for event handles between different processes is
17
+ * not available. This could impact some applications that rely on cross-process
18
+ * synchronization and communication.
19
+ */
20
+ struct TORCH_XPU_API XPUEvent {
21
+ // Constructors
22
+ XPUEvent(bool enable_timing = false) noexcept
23
+ : enable_timing_{enable_timing} {}
24
+
25
+ ~XPUEvent() = default;
26
+
27
+ XPUEvent(const XPUEvent&) = delete;
28
+ XPUEvent& operator=(const XPUEvent&) = delete;
29
+
30
+ XPUEvent(XPUEvent&& other) = default;
31
+ XPUEvent& operator=(XPUEvent&& other) = default;
32
+
33
+ operator sycl::event&() const {
34
+ return event();
35
+ }
36
+
37
+ std::optional<at::Device> device() const {
38
+ if (isCreated()) {
39
+ return at::Device(at::kXPU, device_index_);
40
+ } else {
41
+ return std::nullopt;
42
+ }
43
+ }
44
+
45
+ inline bool isCreated() const {
46
+ return (event_.get() != nullptr);
47
+ }
48
+
49
+ DeviceIndex device_index() const {
50
+ return device_index_;
51
+ }
52
+
53
+ sycl::event& event() const {
54
+ return *event_;
55
+ }
56
+
57
+ bool query() const {
58
+ using namespace sycl::info;
59
+ if (!isCreated()) {
60
+ return true;
61
+ }
62
+
63
+ return event().get_info<event::command_execution_status>() ==
64
+ event_command_status::complete;
65
+ }
66
+
67
+ void record() {
68
+ record(getCurrentXPUStream());
69
+ }
70
+
71
+ void recordOnce(const XPUStream& stream) {
72
+ if (!isCreated()) {
73
+ record(stream);
74
+ }
75
+ }
76
+
77
+ void record(const XPUStream& stream) {
78
+ if (!isCreated()) {
79
+ device_index_ = stream.device_index();
80
+ } else {
81
+ TORCH_CHECK(
82
+ device_index_ == stream.device_index(),
83
+ "Event device ",
84
+ device_index_,
85
+ " does not match recording stream's device ",
86
+ stream.device_index(),
87
+ ".");
88
+ event_.reset();
89
+ }
90
+ event_ = std::make_unique<sycl::event>(
91
+ stream.queue().ext_oneapi_submit_barrier());
92
+ }
93
+
94
+ void block(const XPUStream& stream) {
95
+ if (isCreated()) {
96
+ std::vector<sycl::event> event_list{event()};
97
+ // Make this stream wait until event_ is completed.
98
+ stream.queue().ext_oneapi_submit_barrier(event_list);
99
+ }
100
+ }
101
+
102
+ float elapsed_time(const XPUEvent& other) const {
103
+ TORCH_CHECK(
104
+ isCreated() && other.isCreated(),
105
+ "Both events must be recorded before calculating elapsed time.");
106
+ TORCH_CHECK(
107
+ query() && other.query(),
108
+ "Both events must be completed before calculating elapsed time.");
109
+ TORCH_CHECK(
110
+ enable_timing_ && other.enable_timing_,
111
+ "Both events must be created with argument 'enable_timing=True'.");
112
+ // TODO: provides the ability to time the execution of commands in a SYCL
113
+ // queue without enabling profiling on the entire queue
114
+ TORCH_CHECK_NOT_IMPLEMENTED(
115
+ false, "elapsed_time is not supported by XPUEvent.");
116
+ }
117
+
118
+ void synchronize() const {
119
+ if (isCreated()) {
120
+ event().wait_and_throw();
121
+ }
122
+ }
123
+
124
+ private:
125
+ bool enable_timing_ = false;
126
+ DeviceIndex device_index_ = -1;
127
+ // Only need to track the last event, as events in an in-order queue are
128
+ // executed sequentially.
129
+ std::unique_ptr<sycl::event> event_;
130
+ };
131
+
132
+ } // namespace at::xpu
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUGeneratorImpl.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+
5
+ namespace at {
6
+
7
+ struct TORCH_API XPUGeneratorImpl : public GeneratorImpl {
8
+ // Constructors
9
+ XPUGeneratorImpl(DeviceIndex device_index = -1);
10
+ ~XPUGeneratorImpl() override = default;
11
+
12
+ // XPUGeneratorImpl methods
13
+ std::shared_ptr<XPUGeneratorImpl> clone() const;
14
+ void set_current_seed(uint64_t seed) override;
15
+ void set_offset(uint64_t offset) override;
16
+ uint64_t get_offset() const override;
17
+ uint64_t current_seed() const override;
18
+ uint64_t seed() override;
19
+ void set_state(const c10::TensorImpl& new_state) override;
20
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
21
+ void set_philox_offset_per_thread(uint64_t offset);
22
+ uint64_t philox_offset_per_thread() const;
23
+ std::pair<uint64_t, uint64_t> philox_engine_inputs(uint64_t increment);
24
+ static c10::DeviceType device_type();
25
+
26
+ private:
27
+ XPUGeneratorImpl* clone_impl() const override;
28
+ uint64_t seed_ = default_rng_seed_val;
29
+ uint64_t philox_offset_per_thread_ = 0;
30
+ };
31
+
32
+ namespace xpu::detail {
33
+
34
+ TORCH_XPU_API const Generator& getDefaultXPUGenerator(DeviceIndex device = -1);
35
+
36
+ TORCH_XPU_API Generator createXPUGenerator(DeviceIndex device = -1);
37
+
38
+ } // namespace xpu::detail
39
+ } // namespace at
llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/detail/XPUHooks.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/detail/XPUHooksInterface.h>
4
+
5
+ namespace at::xpu::detail {
6
+
7
+ // The real implementation of XPUHooksInterface
8
+ struct XPUHooks : public at::XPUHooksInterface {
9
+ XPUHooks(at::XPUHooksArgs) {}
10
+ void initXPU() const override;
11
+ bool hasXPU() const override;
12
+ std::string showConfig() const override;
13
+ int32_t getGlobalIdxFromDevice(const at::Device& device) const override;
14
+ Generator getXPUGenerator(DeviceIndex device_index = -1) const override;
15
+ const Generator& getDefaultXPUGenerator(
16
+ DeviceIndex device_index = -1) const override;
17
+ Device getDeviceFromPtr(void* data) const override;
18
+ c10::DeviceIndex getNumGPUs() const override;
19
+ DeviceIndex current_device() const override;
20
+ void deviceSynchronize(DeviceIndex device_index) const override;
21
+ };
22
+
23
+ } // namespace at::xpu::detail
llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Export.h ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_MACROS_EXPORT_H_
2
+ #define C10_MACROS_EXPORT_H_
3
+
4
+ /* Header file to define the common scaffolding for exported symbols.
5
+ *
6
+ * Export is by itself a quite tricky situation to deal with, and if you are
7
+ * hitting this file, make sure you start with the background here:
8
+ * - Linux: https://gcc.gnu.org/wiki/Visibility
9
+ * - Windows:
10
+ * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017
11
+ *
12
+ * Do NOT include this file directly. Instead, use c10/macros/Macros.h
13
+ */
14
+
15
+ // You do not need to edit this part of file unless you are changing the core
16
+ // pytorch export abstractions.
17
+ //
18
+ // This part defines the C10 core export and import macros. This is controlled
19
+ // by whether we are building shared libraries or not, which is determined
20
+ // during build time and codified in c10/core/cmake_macros.h.
21
+ // When the library is built as a shared lib, EXPORT and IMPORT will contain
22
+ // visibility attributes. If it is being built as a static lib, then EXPORT
23
+ // and IMPORT basically have no effect.
24
+
25
+ // As a rule of thumb, you should almost NEVER mix static and shared builds for
26
+ // libraries that depend on c10. AKA, if c10 is built as a static library, we
27
+ // recommend everything dependent on c10 to be built statically. If c10 is built
28
+ // as a shared library, everything dependent on it should be built as shared. In
29
+ // the PyTorch project, all native libraries shall use the macro
30
+ // C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static
31
+ // libraries.
32
+
33
+ // For build systems that do not directly depend on CMake and directly build
34
+ // from the source directory (such as Buck), one may not have a cmake_macros.h
35
+ // file at all. In this case, the build system is responsible for providing
36
+ // correct macro definitions corresponding to the cmake_macros.h.in file.
37
+ //
38
+ // In such scenarios, one should define the macro
39
+ // C10_USING_CUSTOM_GENERATED_MACROS
40
+ // to inform this header that it does not need to include the cmake_macros.h
41
+ // file.
42
+
43
+ #ifndef C10_USING_CUSTOM_GENERATED_MACROS
44
+ #include <c10/macros/cmake_macros.h>
45
+ #endif // C10_USING_CUSTOM_GENERATED_MACROS
46
+
47
+ #ifdef _WIN32
48
+ #define C10_HIDDEN
49
+ #if defined(C10_BUILD_SHARED_LIBS)
50
+ #define C10_EXPORT __declspec(dllexport)
51
+ #define C10_IMPORT __declspec(dllimport)
52
+ #else
53
+ #define C10_EXPORT
54
+ #define C10_IMPORT
55
+ #endif
56
+ #else // _WIN32
57
+ #if defined(__GNUC__)
58
+ #define C10_EXPORT __attribute__((__visibility__("default")))
59
+ #define C10_HIDDEN __attribute__((__visibility__("hidden")))
60
+ #else // defined(__GNUC__)
61
+ #define C10_EXPORT
62
+ #define C10_HIDDEN
63
+ #endif // defined(__GNUC__)
64
+ #define C10_IMPORT C10_EXPORT
65
+ #endif // _WIN32
66
+
67
+ #ifdef NO_EXPORT
68
+ #undef C10_EXPORT
69
+ #define C10_EXPORT
70
+ #endif
71
+
72
+ // Definition of an adaptive XX_API macro, that depends on whether you are
73
+ // building the library itself or not, routes to XX_EXPORT and XX_IMPORT.
74
+ // Basically, you will need to do this for each shared library that you are
75
+ // building, and the instruction is as follows: assuming that you are building
76
+ // a library called libawesome.so. You should:
77
+ // (1) for your cmake target (usually done by "add_library(awesome, ...)"),
78
+ // define a macro called AWESOME_BUILD_MAIN_LIB using
79
+ // target_compile_options.
80
+ // (2) define the AWESOME_API macro similar to the one below.
81
+ // And in the source file of your awesome library, use AWESOME_API to
82
+ // annotate public symbols.
83
+
84
+ // Here, for the C10 library, we will define the macro C10_API for both import
85
+ // and export.
86
+
87
+ // This one is being used by libc10.so
88
+ #ifdef C10_BUILD_MAIN_LIB
89
+ #define C10_API C10_EXPORT
90
+ #else
91
+ #define C10_API C10_IMPORT
92
+ #endif
93
+
94
+ // This one is being used by libtorch.so
95
+ #ifdef CAFFE2_BUILD_MAIN_LIB
96
+ #define TORCH_API C10_EXPORT
97
+ #else
98
+ #define TORCH_API C10_IMPORT
99
+ #endif
100
+
101
+ // You may be wondering: Whose brilliant idea was it to split torch_cuda into
102
+ // two pieces with confusing names?
103
+ // Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we
104
+ // tried to compile PyTorch for CUDA 11.1, which ran into relocation marker
105
+ // issues when linking big binaries.
106
+ // (https://github.com/pytorch/pytorch/issues/39968) We had two choices:
107
+ // (1) Stop supporting so many GPU architectures
108
+ // (2) Do something else
109
+ // We chose #2 and decided to split the behemoth that was torch_cuda into two
110
+ // smaller libraries, one with most of the core kernel functions (torch_cuda_cu)
111
+ // and the other that had..well..everything else (torch_cuda_cpp). The idea was
112
+ // this: instead of linking our static libraries (like the hefty
113
+ // libcudnn_static.a) with another huge library, torch_cuda, and run into pesky
114
+ // relocation marker issues, we could link our static libraries to a smaller
115
+ // part of torch_cuda (torch_cuda_cpp) and avoid the issues.
116
+
117
+ // libtorch_cuda_cu.so
118
+ #ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB
119
+ #define TORCH_CUDA_CU_API C10_EXPORT
120
+ #elif defined(BUILD_SPLIT_CUDA)
121
+ #define TORCH_CUDA_CU_API C10_IMPORT
122
+ #endif
123
+
124
+ // libtorch_cuda_cpp.so
125
+ #ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB
126
+ #define TORCH_CUDA_CPP_API C10_EXPORT
127
+ #elif defined(BUILD_SPLIT_CUDA)
128
+ #define TORCH_CUDA_CPP_API C10_IMPORT
129
+ #endif
130
+
131
+ // libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the
132
+ // same api)
133
+ #ifdef TORCH_CUDA_BUILD_MAIN_LIB
134
+ #define TORCH_CUDA_CPP_API C10_EXPORT
135
+ #define TORCH_CUDA_CU_API C10_EXPORT
136
+ #elif !defined(BUILD_SPLIT_CUDA)
137
+ #define TORCH_CUDA_CPP_API C10_IMPORT
138
+ #define TORCH_CUDA_CU_API C10_IMPORT
139
+ #endif
140
+
141
+ #if defined(TORCH_HIP_BUILD_MAIN_LIB)
142
+ #define TORCH_HIP_API C10_EXPORT
143
+ #else
144
+ #define TORCH_HIP_API C10_IMPORT
145
+ #endif
146
+
147
+ #if defined(TORCH_XPU_BUILD_MAIN_LIB)
148
+ #define TORCH_XPU_API C10_EXPORT
149
+ #else
150
+ #define TORCH_XPU_API C10_IMPORT
151
+ #endif
152
+
153
+ // Enums only need to be exported on windows for non-CUDA files
154
+ #if defined(_WIN32) && defined(__CUDACC__)
155
+ #define C10_API_ENUM C10_API
156
+ #else
157
+ #define C10_API_ENUM
158
+ #endif
159
+
160
+ #endif // C10_MACROS_MACROS_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_MACROS_CMAKE_MACROS_H_
2
+ #define C10_MACROS_CMAKE_MACROS_H_
3
+
4
+ // Automatically generated header file for the C10 library.
5
+ // Do not include this file directly. Instead, include c10/macros/Macros.h.
6
+
7
+ #define C10_BUILD_SHARED_LIBS
8
+ /* #undef C10_USE_GLOG */
9
+ /* #undef C10_USE_GFLAGS */
10
+ /* #undef C10_USE_NUMA */
11
+ /* #undef C10_USE_MSVC_STATIC_RUNTIME */
12
+ /* #undef C10_USE_ROCM_KERNEL_ASSERT */
13
+
14
+ #endif // C10_MACROS_CMAKE_MACROS_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <c10/util/Backtrace.h>
3
+ #include <c10/util/env.h>
4
+ #include <cstdlib>
5
+ #include <exception>
6
+ #include <iostream>
7
+ #include <mutex>
8
+ #include <optional>
9
+
10
+ namespace c10 {
11
+ class AbortHandlerHelper {
12
+ public:
13
+ static AbortHandlerHelper& getInstance() {
14
+ #ifdef _WIN32
15
+ thread_local
16
+ #endif // _WIN32
17
+ static AbortHandlerHelper instance;
18
+ return instance;
19
+ }
20
+
21
+ void set(std::terminate_handler handler) {
22
+ std::lock_guard<std::mutex> lk(mutex);
23
+ if (!inited) {
24
+ prev = std::set_terminate(handler);
25
+ curr = std::get_terminate();
26
+ inited = true;
27
+ }
28
+ }
29
+
30
+ std::terminate_handler getPrev() const {
31
+ return prev;
32
+ }
33
+
34
+ private:
35
+ std::terminate_handler prev = nullptr;
36
+ std::terminate_handler curr = nullptr;
37
+ bool inited = false;
38
+ std::mutex mutex;
39
+ AbortHandlerHelper() = default;
40
+ ~AbortHandlerHelper() {
41
+ // Only restore the handler if we are the current one
42
+ if (inited && curr == std::get_terminate()) {
43
+ std::set_terminate(prev);
44
+ }
45
+ }
46
+
47
+ public:
48
+ AbortHandlerHelper(AbortHandlerHelper const&) = delete;
49
+ void operator=(AbortHandlerHelper const&) = delete;
50
+ };
51
+
52
+ namespace detail {
53
+ C10_ALWAYS_INLINE void terminate_handler() {
54
+ std::cout << "Unhandled exception caught in c10/util/AbortHandler.h" << '\n';
55
+ auto backtrace = get_backtrace();
56
+ std::cout << backtrace << '\n' << std::flush;
57
+ auto prev_handler = AbortHandlerHelper::getInstance().getPrev();
58
+ if (prev_handler) {
59
+ prev_handler();
60
+ } else {
61
+ std::abort();
62
+ }
63
+ }
64
+ } // namespace detail
65
+
66
+ C10_ALWAYS_INLINE void set_terminate_handler() {
67
+ bool use_custom_terminate = false;
68
+ // On Windows it is enabled by default based on
69
+ // https://github.com/pytorch/pytorch/pull/50320#issuecomment-763147062
70
+ #ifdef _WIN32
71
+ use_custom_terminate = true;
72
+ #endif // _WIN32
73
+ auto result = c10::utils::check_env("TORCH_CUSTOM_TERMINATE");
74
+ if (result != std::nullopt) {
75
+ use_custom_terminate = result.value();
76
+ }
77
+ if (use_custom_terminate) {
78
+ AbortHandlerHelper::getInstance().set(detail::terminate_handler);
79
+ }
80
+ }
81
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023-present Facebook. All Rights Reserved.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/macros/Export.h>
6
+ #include <array>
7
+ #include <chrono>
8
+ #include <cstddef>
9
+ #include <cstdint>
10
+ #include <ctime>
11
+ #include <functional>
12
+ #include <type_traits>
13
+
14
+ #if defined(C10_IOS) && defined(C10_MOBILE)
15
+ #include <sys/time.h> // for gettimeofday()
16
+ #endif
17
+
18
+ #if defined(__i386__) || defined(__x86_64__) || defined(__amd64__)
19
+ #define C10_RDTSC
20
+ #if defined(_MSC_VER)
21
+ #include <intrin.h>
22
+ #elif defined(__CUDACC__) || defined(__HIPCC__)
23
+ #undef C10_RDTSC
24
+ #elif defined(__clang__)
25
+ // `__rdtsc` is available by default.
26
+ // NB: This has to be first, because Clang will also define `__GNUC__`
27
+ #elif defined(__GNUC__)
28
+ #include <x86intrin.h>
29
+ #else
30
+ #undef C10_RDTSC
31
+ #endif
32
+ #endif
33
+
34
+ namespace c10 {
35
+
36
+ using time_t = int64_t;
37
+ using steady_clock_t = std::conditional_t<
38
+ std::chrono::high_resolution_clock::is_steady,
39
+ std::chrono::high_resolution_clock,
40
+ std::chrono::steady_clock>;
41
+
42
+ inline time_t getTimeSinceEpoch() {
43
+ auto now = std::chrono::system_clock::now().time_since_epoch();
44
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(now).count();
45
+ }
46
+
47
+ inline time_t getTime(bool allow_monotonic = false) {
48
+ #if defined(C10_IOS) && defined(C10_MOBILE)
49
+ // clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS
50
+ // can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime
51
+ // is implemented or not
52
+ struct timeval now;
53
+ gettimeofday(&now, NULL);
54
+ return static_cast<time_t>(now.tv_sec) * 1000000000 +
55
+ static_cast<time_t>(now.tv_usec) * 1000;
56
+ #elif defined(_WIN32) || defined(__MACH__)
57
+ return std::chrono::duration_cast<std::chrono::nanoseconds>(
58
+ steady_clock_t::now().time_since_epoch())
59
+ .count();
60
+ #else
61
+ // clock_gettime is *much* faster than std::chrono implementation on Linux
62
+ struct timespec t {};
63
+ auto mode = CLOCK_REALTIME;
64
+ if (allow_monotonic) {
65
+ mode = CLOCK_MONOTONIC;
66
+ }
67
+ clock_gettime(mode, &t);
68
+ return static_cast<time_t>(t.tv_sec) * 1000000000 +
69
+ static_cast<time_t>(t.tv_nsec);
70
+ #endif
71
+ }
72
+
73
+ // We often do not need to capture true wall times. If a fast mechanism such
74
+ // as TSC is available we can use that instead and convert back to epoch time
75
+ // during post processing. This greatly reduce the clock's contribution to
76
+ // profiling.
77
+ // http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/
78
+ // https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io
79
+ // TODO: We should use
80
+ // `https://github.com/google/benchmark/blob/main/src/cycleclock.h`
81
+ inline auto getApproximateTime() {
82
+ #if defined(C10_RDTSC)
83
+ return static_cast<uint64_t>(__rdtsc());
84
+ #else
85
+ return getTime();
86
+ #endif
87
+ }
88
+
89
+ using approx_time_t = decltype(getApproximateTime());
90
+ static_assert(
91
+ std::is_same_v<approx_time_t, int64_t> ||
92
+ std::is_same_v<approx_time_t, uint64_t>,
93
+ "Expected either int64_t (`getTime`) or uint64_t (some TSC reads).");
94
+
95
+ // Convert `getCount` results to Nanoseconds since unix epoch.
96
+ class C10_API ApproximateClockToUnixTimeConverter final {
97
+ public:
98
+ ApproximateClockToUnixTimeConverter();
99
+ std::function<time_t(approx_time_t)> makeConverter();
100
+
101
+ struct UnixAndApproximateTimePair {
102
+ time_t t_;
103
+ approx_time_t approx_t_;
104
+ };
105
+ static UnixAndApproximateTimePair measurePair();
106
+
107
+ private:
108
+ static constexpr size_t replicates = 1001;
109
+ using time_pairs = std::array<UnixAndApproximateTimePair, replicates>;
110
+ time_pairs measurePairs();
111
+
112
+ time_pairs start_times_;
113
+ };
114
+
115
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/util/BFloat16.h>
4
+ #include <c10/util/Half.h>
5
+
6
+ C10_CLANG_DIAGNOSTIC_PUSH()
7
+ #if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
8
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
9
+ #endif
10
+
11
+ namespace std {
12
+
13
+ template <typename T>
14
+ struct is_reduced_floating_point
15
+ : std::integral_constant<
16
+ bool,
17
+ std::is_same_v<T, c10::Half> || std::is_same_v<T, c10::BFloat16>> {};
18
+
19
+ template <typename T>
20
+ constexpr bool is_reduced_floating_point_v =
21
+ is_reduced_floating_point<T>::value;
22
+
23
+ template <
24
+ typename T,
25
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
26
+ inline T acos(T a) {
27
+ return std::acos(float(a));
28
+ }
29
+ template <
30
+ typename T,
31
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
32
+ inline T asin(T a) {
33
+ return std::asin(float(a));
34
+ }
35
+ template <
36
+ typename T,
37
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
38
+ inline T atan(T a) {
39
+ return std::atan(float(a));
40
+ }
41
+ template <
42
+ typename T,
43
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
44
+ inline T atanh(T a) {
45
+ return std::atanh(float(a));
46
+ }
47
+ template <
48
+ typename T,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
50
+ inline T erf(T a) {
51
+ return std::erf(float(a));
52
+ }
53
+ template <
54
+ typename T,
55
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
56
+ inline T erfc(T a) {
57
+ return std::erfc(float(a));
58
+ }
59
+ template <
60
+ typename T,
61
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
62
+ inline T exp(T a) {
63
+ return std::exp(float(a));
64
+ }
65
+ template <
66
+ typename T,
67
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
68
+ inline T expm1(T a) {
69
+ return std::expm1(float(a));
70
+ }
71
+ template <
72
+ typename T,
73
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
74
+ inline T log(T a) {
75
+ return std::log(float(a));
76
+ }
77
+ template <
78
+ typename T,
79
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
80
+ inline T log10(T a) {
81
+ return std::log10(float(a));
82
+ }
83
+ template <
84
+ typename T,
85
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
86
+ inline T log1p(T a) {
87
+ return std::log1p(float(a));
88
+ }
89
+ template <
90
+ typename T,
91
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
92
+ inline T log2(T a) {
93
+ return std::log2(float(a));
94
+ }
95
+ template <
96
+ typename T,
97
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
98
+ inline T ceil(T a) {
99
+ return std::ceil(float(a));
100
+ }
101
+ template <
102
+ typename T,
103
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
104
+ inline T cos(T a) {
105
+ return std::cos(float(a));
106
+ }
107
+ template <
108
+ typename T,
109
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
110
+ inline T floor(T a) {
111
+ return std::floor(float(a));
112
+ }
113
+ template <
114
+ typename T,
115
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
116
+ inline T nearbyint(T a) {
117
+ return std::nearbyint(float(a));
118
+ }
119
+ template <
120
+ typename T,
121
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
122
+ inline T sin(T a) {
123
+ return std::sin(float(a));
124
+ }
125
+ template <
126
+ typename T,
127
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
128
+ inline T tan(T a) {
129
+ return std::tan(float(a));
130
+ }
131
+ template <
132
+ typename T,
133
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
134
+ inline T sinh(T a) {
135
+ return std::sinh(float(a));
136
+ }
137
+ template <
138
+ typename T,
139
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
140
+ inline T cosh(T a) {
141
+ return std::cosh(float(a));
142
+ }
143
+ template <
144
+ typename T,
145
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
146
+ inline T tanh(T a) {
147
+ return std::tanh(float(a));
148
+ }
149
+ template <
150
+ typename T,
151
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
152
+ inline T trunc(T a) {
153
+ return std::trunc(float(a));
154
+ }
155
+ template <
156
+ typename T,
157
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
158
+ inline T lgamma(T a) {
159
+ return std::lgamma(float(a));
160
+ }
161
+ template <
162
+ typename T,
163
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
164
+ inline T sqrt(T a) {
165
+ return std::sqrt(float(a));
166
+ }
167
+ template <
168
+ typename T,
169
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
170
+ inline T rsqrt(T a) {
171
+ return 1.0 / std::sqrt(float(a));
172
+ }
173
+ template <
174
+ typename T,
175
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
176
+ inline T abs(T a) {
177
+ return std::abs(float(a));
178
+ }
179
+ #if defined(_MSC_VER) && defined(__CUDACC__)
180
+ template <
181
+ typename T,
182
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
183
+ inline T pow(T a, double b) {
184
+ return std::pow(float(a), float(b));
185
+ }
186
+ #else
187
+ template <
188
+ typename T,
189
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
190
+ inline T pow(T a, double b) {
191
+ return std::pow(float(a), b);
192
+ }
193
+ #endif
194
+ template <
195
+ typename T,
196
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
197
+ inline T pow(T a, T b) {
198
+ return std::pow(float(a), float(b));
199
+ }
200
+ template <
201
+ typename T,
202
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
203
+ inline T fmod(T a, T b) {
204
+ return std::fmod(float(a), float(b));
205
+ }
206
+
207
+ /*
208
+ The following function is inspired from the implementation in `musl`
209
+ Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
210
+ ----------------------------------------------------------------------
211
+ Copyright © 2005-2020 Rich Felker, et al.
212
+
213
+ Permission is hereby granted, free of charge, to any person obtaining
214
+ a copy of this software and associated documentation files (the
215
+ "Software"), to deal in the Software without restriction, including
216
+ without limitation the rights to use, copy, modify, merge, publish,
217
+ distribute, sublicense, and/or sell copies of the Software, and to
218
+ permit persons to whom the Software is furnished to do so, subject to
219
+ the following conditions:
220
+
221
+ The above copyright notice and this permission notice shall be
222
+ included in all copies or substantial portions of the Software.
223
+
224
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
225
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
226
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
227
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
228
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
229
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
230
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
231
+ ----------------------------------------------------------------------
232
+ */
233
+ template <
234
+ typename T,
235
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
236
+ C10_HOST_DEVICE inline T nextafter(T from, T to) {
237
+ // Reference:
238
+ // https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c
239
+ using int_repr_t = uint16_t;
240
+ using float_t = T;
241
+ constexpr uint8_t bits = 16;
242
+ union {
243
+ float_t f;
244
+ int_repr_t i;
245
+ } ufrom = {from}, uto = {to};
246
+
247
+ // get a mask to get the sign bit i.e. MSB
248
+ int_repr_t sign_mask = int_repr_t{1} << (bits - 1);
249
+
250
+ // short-circuit: if either is NaN, return NaN
251
+ if (from != from || to != to) {
252
+ return from + to;
253
+ }
254
+
255
+ // short-circuit: if they are exactly the same.
256
+ if (ufrom.i == uto.i) {
257
+ return from;
258
+ }
259
+
260
+ // mask the sign-bit to zero i.e. positive
261
+ // equivalent to abs(x)
262
+ int_repr_t abs_from = ufrom.i & ~sign_mask;
263
+ int_repr_t abs_to = uto.i & ~sign_mask;
264
+ if (abs_from == 0) {
265
+ // if both are zero but with different sign,
266
+ // preserve the sign of `to`.
267
+ if (abs_to == 0) {
268
+ return to;
269
+ }
270
+ // smallest subnormal with sign of `to`.
271
+ ufrom.i = (uto.i & sign_mask) | int_repr_t{1};
272
+ return ufrom.f;
273
+ }
274
+
275
+ // if abs(from) > abs(to) or sign(from) != sign(to)
276
+ if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) {
277
+ ufrom.i--;
278
+ } else {
279
+ ufrom.i++;
280
+ }
281
+
282
+ return ufrom.f;
283
+ }
284
+
285
+ } // namespace std
286
+
287
+ C10_CLANG_DIAGNOSTIC_POP()
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_BACKTRACE_H_
2
+ #define C10_UTIL_BACKTRACE_H_
3
+
4
+ #include <cstddef>
5
+ #include <string>
6
+ #include <typeinfo>
7
+
8
+ #include <c10/macros/Macros.h>
9
+
10
+ namespace c10 {
11
+ C10_API std::string get_backtrace(
12
+ size_t frames_to_skip = 0,
13
+ size_t maximum_number_of_frames = 64,
14
+ bool skip_python_frames = true);
15
+ } // namespace c10
16
+
17
+ #endif // C10_UTIL_BACKTRACE_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #if defined(_MSC_VER)
5
+ #include <intrin.h>
6
+ #endif
7
+
8
+ namespace c10::utils {
9
+
10
+ /**
11
+ * This is a simple bitset class with sizeof(long long int) bits.
12
+ * You can set bits, unset bits, query bits by index,
13
+ * and query for the first set bit.
14
+ * Before using this class, please also take a look at std::bitset,
15
+ * which has more functionality and is more generic. It is probably
16
+ * a better fit for your use case. The sole reason for c10::utils::bitset
17
+ * to exist is that std::bitset misses a find_first_set() method.
18
+ */
19
+ struct bitset final {
20
+ private:
21
+ #if defined(_MSC_VER)
22
+ // MSVCs _BitScanForward64 expects int64_t
23
+ using bitset_type = int64_t;
24
+ #else
25
+ // POSIX ffsll expects long long int
26
+ using bitset_type = long long int;
27
+ #endif
28
+ public:
29
+ static constexpr size_t NUM_BITS() {
30
+ return 8 * sizeof(bitset_type);
31
+ }
32
+
33
+ constexpr bitset() noexcept = default;
34
+ constexpr bitset(const bitset&) noexcept = default;
35
+ constexpr bitset(bitset&&) noexcept = default;
36
+ // there is an issure for gcc 5.3.0 when define default function as constexpr
37
+ // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754.
38
+ bitset& operator=(const bitset&) noexcept = default;
39
+ bitset& operator=(bitset&&) noexcept = default;
40
+
41
+ constexpr void set(size_t index) noexcept {
42
+ bitset_ |= (static_cast<long long int>(1) << index);
43
+ }
44
+
45
+ constexpr void unset(size_t index) noexcept {
46
+ bitset_ &= ~(static_cast<long long int>(1) << index);
47
+ }
48
+
49
+ constexpr bool get(size_t index) const noexcept {
50
+ return bitset_ & (static_cast<long long int>(1) << index);
51
+ }
52
+
53
+ constexpr bool is_entirely_unset() const noexcept {
54
+ return 0 == bitset_;
55
+ }
56
+
57
+ // Call the given functor with the index of each bit that is set
58
+ template <class Func>
59
+ void for_each_set_bit(Func&& func) const {
60
+ bitset cur = *this;
61
+ size_t index = cur.find_first_set();
62
+ while (0 != index) {
63
+ // -1 because find_first_set() is not one-indexed.
64
+ index -= 1;
65
+ func(index);
66
+ cur.unset(index);
67
+ index = cur.find_first_set();
68
+ }
69
+ }
70
+
71
+ private:
72
+ // Return the index of the first set bit. The returned index is one-indexed
73
+ // (i.e. if the very first bit is set, this function returns '1'), and a
74
+ // return of '0' means that there was no bit set.
75
+ size_t find_first_set() const {
76
+ #if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
77
+ unsigned long result;
78
+ bool has_bits_set = (0 != _BitScanForward64(&result, bitset_));
79
+ if (!has_bits_set) {
80
+ return 0;
81
+ }
82
+ return result + 1;
83
+ #elif defined(_MSC_VER) && defined(_M_IX86)
84
+ unsigned long result;
85
+ if (static_cast<uint32_t>(bitset_) != 0) {
86
+ bool has_bits_set =
87
+ (0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_)));
88
+ if (!has_bits_set) {
89
+ return 0;
90
+ }
91
+ return result + 1;
92
+ } else {
93
+ bool has_bits_set =
94
+ (0 != _BitScanForward(&result, static_cast<uint32_t>(bitset_ >> 32)));
95
+ if (!has_bits_set) {
96
+ return 32;
97
+ }
98
+ return result + 33;
99
+ }
100
+ #else
101
+ return __builtin_ffsll(bitset_);
102
+ #endif
103
+ }
104
+
105
+ friend bool operator==(bitset lhs, bitset rhs) noexcept {
106
+ return lhs.bitset_ == rhs.bitset_;
107
+ }
108
+
109
+ bitset_type bitset_{0};
110
+ };
111
+
112
+ inline bool operator!=(bitset lhs, bitset rhs) noexcept {
113
+ return !(lhs == rhs);
114
+ }
115
+
116
+ } // namespace c10::utils
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/C++17.h ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #ifndef C10_UTIL_CPP17_H_
3
+ #define C10_UTIL_CPP17_H_
4
+
5
+ #include <c10/macros/Macros.h>
6
+ #include <functional>
7
+ #include <memory>
8
+ #include <type_traits>
9
+ #include <utility>
10
+
11
+ #if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \
12
+ __GNUC__ < 9
13
+ #error \
14
+ "You're trying to build PyTorch with a too old version of GCC. We need GCC 9 or later."
15
+ #endif
16
+
17
+ #if defined(__clang__) && __clang_major__ < 9
18
+ #error \
19
+ "You're trying to build PyTorch with a too old version of Clang. We need Clang 9 or later."
20
+ #endif
21
+
22
+ #if (defined(_MSC_VER) && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L)) || \
23
+ (!defined(_MSC_VER) && __cplusplus < 201703L)
24
+ #error You need C++17 to compile PyTorch
25
+ #endif
26
+
27
+ #if defined(_WIN32) && (defined(min) || defined(max))
28
+ #error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows
29
+ #endif
30
+
31
+ /*
32
+ * This header adds some polyfills with C++17 functionality
33
+ */
34
+
35
+ namespace c10 {
36
+
37
+ // in c++17 std::result_of has been superseded by std::invoke_result. Since
38
+ // c++20, std::result_of is removed.
39
+ template <typename F, typename... args>
40
+ #if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L
41
+ using invoke_result = typename std::invoke_result<F, args...>;
42
+ #else
43
+ using invoke_result = typename std::result_of<F && (args && ...)>;
44
+ #endif
45
+
46
+ template <typename F, typename... args>
47
+ using invoke_result_t = typename invoke_result<F, args...>::type;
48
+
49
+ // std::is_pod is deprecated in C++20, std::is_standard_layout and
50
+ // std::is_trivial are introduced in C++11, std::conjunction has been introduced
51
+ // in C++17.
52
+ template <typename T>
53
+ using is_pod = std::conjunction<std::is_standard_layout<T>, std::is_trivial<T>>;
54
+
55
+ template <typename T>
56
+ constexpr bool is_pod_v = is_pod<T>::value;
57
+
58
+ namespace guts {
59
+
60
+ template <typename Base, typename Child, typename... Args>
61
+ std::enable_if_t<
62
+ !std::is_array_v<Base> && !std::is_array_v<Child> &&
63
+ std::is_base_of_v<Base, Child>,
64
+ std::unique_ptr<Base>>
65
+ make_unique_base(Args&&... args) {
66
+ return std::unique_ptr<Base>(new Child(std::forward<Args>(args)...));
67
+ }
68
+
69
+ template <class... B>
70
+ using conjunction = std::conjunction<B...>;
71
+ template <class... B>
72
+ using disjunction = std::disjunction<B...>;
73
+ template <bool B>
74
+ using bool_constant = std::bool_constant<B>;
75
+ template <class B>
76
+ using negation = std::negation<B>;
77
+
78
+ template <class T>
79
+ using void_t = std::void_t<T>;
80
+
81
+ #if defined(__cpp_lib_apply) && !defined(__CUDA_ARCH__) && !defined(__HIP__)
82
+
83
+ template <class F, class Tuple>
84
+ C10_HOST_DEVICE inline constexpr decltype(auto) apply(F&& f, Tuple&& t) {
85
+ return std::apply(std::forward<F>(f), std::forward<Tuple>(t));
86
+ }
87
+
88
+ #else
89
+
90
+ // Implementation from http://en.cppreference.com/w/cpp/utility/apply (but
91
+ // modified)
92
+ // TODO This is an incomplete implementation of std::apply, not working for
93
+ // member functions.
94
+ namespace detail {
95
+ template <class F, class Tuple, std::size_t... INDEX>
96
+ #if defined(_MSC_VER)
97
+ // MSVC has a problem with the decltype() return type, but it also doesn't need
98
+ // it
99
+ C10_HOST_DEVICE constexpr auto apply_impl(
100
+ F&& f,
101
+ Tuple&& t,
102
+ std::index_sequence<INDEX...>)
103
+ #else
104
+ // GCC/Clang need the decltype() return type
105
+ C10_HOST_DEVICE constexpr decltype(auto) apply_impl(
106
+ F&& f,
107
+ Tuple&& t,
108
+ std::index_sequence<INDEX...>)
109
+ #endif
110
+ {
111
+ return std::forward<F>(f)(std::get<INDEX>(std::forward<Tuple>(t))...);
112
+ }
113
+ } // namespace detail
114
+
115
+ template <class F, class Tuple>
116
+ C10_HOST_DEVICE constexpr decltype(auto) apply(F&& f, Tuple&& t) {
117
+ return detail::apply_impl(
118
+ std::forward<F>(f),
119
+ std::forward<Tuple>(t),
120
+ std::make_index_sequence<
121
+ std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
122
+ }
123
+
124
+ #endif
125
+
126
+ template <typename Functor, typename... Args>
127
+ std::enable_if_t<
128
+ std::is_member_pointer_v<std::decay_t<Functor>>,
129
+ typename c10::invoke_result_t<Functor, Args...>>
130
+ invoke(Functor&& f, Args&&... args) {
131
+ return std::mem_fn(std::forward<Functor>(f))(std::forward<Args>(args)...);
132
+ }
133
+
134
+ template <typename Functor, typename... Args>
135
+ std::enable_if_t<
136
+ !std::is_member_pointer_v<std::decay_t<Functor>>,
137
+ typename c10::invoke_result_t<Functor, Args...>>
138
+ invoke(Functor&& f, Args&&... args) {
139
+ return std::forward<Functor>(f)(std::forward<Args>(args)...);
140
+ }
141
+
142
+ namespace detail {
143
+ struct _identity final {
144
+ template <class T>
145
+ using type_identity = T;
146
+
147
+ template <class T>
148
+ decltype(auto) operator()(T&& arg) {
149
+ return std::forward<T>(arg);
150
+ }
151
+ };
152
+
153
+ template <class Func, class Enable = void>
154
+ struct function_takes_identity_argument : std::false_type {};
155
+
156
+ template <class Func>
157
+ struct function_takes_identity_argument<
158
+ Func,
159
+ std::void_t<decltype(std::declval<Func>()(_identity()))>> : std::true_type {
160
+ };
161
+ } // namespace detail
162
+
163
+ } // namespace guts
164
+ } // namespace c10
165
+
166
+ #endif // C10_UTIL_CPP17_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+ #include <c10/util/Exception.h>
5
+
6
+ /// This file provides some simple utilities for detecting common deadlocks in
7
+ /// PyTorch. For now, we focus exclusively on detecting Python GIL deadlocks,
8
+ /// as the GIL is a wide ranging lock that is taken out in many situations.
9
+ /// The basic strategy is before performing an operation that may block, you
10
+ /// can use TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() to assert that the GIL is
11
+ /// not held. This macro is to be used in contexts where no static dependency
12
+ /// on Python is available (we will handle indirecting a virtual call for you).
13
+ ///
14
+ /// If the GIL is held by a torchdeploy interpreter, we always report false.
15
+ /// If you are in a context where Python bindings are available, it's better
16
+ /// to directly assert on PyGILState_Check (as it avoids a vcall and also
17
+ /// works correctly with torchdeploy.)
18
+
19
+ #define TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() \
20
+ TORCH_INTERNAL_ASSERT( \
21
+ !c10::impl::check_python_gil(), \
22
+ "Holding GIL before a blocking operation! Please release the GIL before blocking, or see https://github.com/pytorch/pytorch/issues/56297 for how to release the GIL for destructors of objects")
23
+
24
+ namespace c10::impl {
25
+
26
+ C10_API bool check_python_gil();
27
+
28
+ struct C10_API PythonGILHooks {
29
+ virtual ~PythonGILHooks() = default;
30
+ // Returns true if we hold the GIL. If not linked against Python we
31
+ // always return false.
32
+ virtual bool check_python_gil() const = 0;
33
+ };
34
+
35
+ C10_API void SetPythonGILHooks(PythonGILHooks* factory);
36
+
37
+ // DO NOT call this registerer from a torch deploy instance! You will clobber
38
+ // other registrations
39
+ struct C10_API PythonGILHooksRegisterer {
40
+ explicit PythonGILHooksRegisterer(PythonGILHooks* factory) {
41
+ SetPythonGILHooks(factory);
42
+ }
43
+ ~PythonGILHooksRegisterer() {
44
+ SetPythonGILHooks(nullptr);
45
+ }
46
+ };
47
+
48
+ } // namespace c10::impl
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Exception.h ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_EXCEPTION_H_
2
+ #define C10_UTIL_EXCEPTION_H_
3
+
4
+ #include <c10/macros/Export.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/StringUtil.h>
7
+
8
+ #include <cstdint>
9
+ #include <exception>
10
+ #include <string>
11
+ #include <variant>
12
+ #include <vector>
13
+
14
+ #if defined(_MSC_VER) && _MSC_VER <= 1900
15
+ #define __func__ __FUNCTION__
16
+ #endif
17
+
18
+ namespace c10 {
19
+
20
+ /// The primary ATen error class.
21
+ /// Provides a complete error message with source location information via
22
+ /// `what()`, and a more concise message via `what_without_backtrace()`.
23
+ /// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead.
24
+ ///
25
+ /// NB: c10::Error is handled specially by the default torch to suppress the
26
+ /// backtrace, see torch/csrc/Exceptions.h
27
+ class C10_API Error : public std::exception {
28
+ // The actual error message.
29
+ std::string msg_;
30
+
31
+ // Context for the message (in order of decreasing specificity). Context will
32
+ // be automatically formatted appropriately, so it is not necessary to add
33
+ // extra leading/trailing newlines to strings inside this vector
34
+ std::vector<std::string> context_;
35
+
36
+ // The C++ backtrace at the point when this exception was raised. This
37
+ // may be empty if there is no valid backtrace. (We don't use optional
38
+ // here to reduce the dependencies this file has.)
39
+ std::string backtrace_;
40
+
41
+ // These two are derived fields from msg_stack_ and backtrace_, but we need
42
+ // fields for the strings so that we can return a const char* (as the
43
+ // signature of std::exception requires). Currently, the invariant
44
+ // is that these fields are ALWAYS populated consistently with respect
45
+ // to msg_stack_ and backtrace_.
46
+ std::string what_;
47
+ std::string what_without_backtrace_;
48
+
49
+ // This is a little debugging trick: you can stash a relevant pointer
50
+ // in caller, and then when you catch the exception, you can compare
51
+ // against pointers you have on hand to get more information about
52
+ // where the exception came from. In Caffe2, this is used to figure
53
+ // out which operator raised an exception.
54
+ const void* caller_;
55
+
56
+ public:
57
+ // PyTorch-style Error constructor. NB: the implementation of this
58
+ // is actually in Logging.cpp
59
+ Error(SourceLocation source_location, std::string msg);
60
+
61
+ // Caffe2-style error message
62
+ Error(
63
+ const char* file,
64
+ const uint32_t line,
65
+ const char* condition,
66
+ const std::string& msg,
67
+ const std::string& backtrace,
68
+ const void* caller = nullptr);
69
+
70
+ // Base constructor
71
+ Error(std::string msg, std::string backtrace, const void* caller = nullptr);
72
+
73
+ // Add some new context to the message stack. The last added context
74
+ // will be formatted at the end of the context list upon printing.
75
+ // WARNING: This method is O(n) in the size of the stack, so don't go
76
+ // wild adding a ridiculous amount of context to error messages.
77
+ void add_context(std::string msg);
78
+
79
+ const std::string& msg() const {
80
+ return msg_;
81
+ }
82
+
83
+ const std::vector<std::string>& context() const {
84
+ return context_;
85
+ }
86
+
87
+ const std::string& backtrace() const {
88
+ return backtrace_;
89
+ }
90
+
91
+ /// Returns the complete error message, including the source location.
92
+ /// The returned pointer is invalidated if you call add_context() on
93
+ /// this object.
94
+ const char* what() const noexcept override {
95
+ return what_.c_str();
96
+ }
97
+
98
+ const void* caller() const noexcept {
99
+ return caller_;
100
+ }
101
+
102
+ /// Returns only the error message string, without source location.
103
+ /// The returned pointer is invalidated if you call add_context() on
104
+ /// this object.
105
+ virtual const char* what_without_backtrace() const noexcept {
106
+ return what_without_backtrace_.c_str();
107
+ }
108
+
109
+ private:
110
+ void refresh_what();
111
+ std::string compute_what(bool include_backtrace) const;
112
+ };
113
+
114
+ class C10_API Warning {
115
+ public:
116
+ class C10_API UserWarning {};
117
+ class C10_API DeprecationWarning {};
118
+
119
+ using warning_variant_t = std::variant<UserWarning, DeprecationWarning>;
120
+
121
+ Warning(
122
+ warning_variant_t type,
123
+ const SourceLocation& source_location,
124
+ std::string msg,
125
+ bool verbatim);
126
+
127
+ Warning(
128
+ warning_variant_t type,
129
+ SourceLocation source_location,
130
+ const char* msg,
131
+ bool verbatim);
132
+
133
+ Warning(
134
+ warning_variant_t type,
135
+ SourceLocation source_location,
136
+ ::c10::detail::CompileTimeEmptyString msg,
137
+ bool verbatim);
138
+
139
+ // Getters for members
140
+ warning_variant_t type() const;
141
+ const SourceLocation& source_location() const;
142
+ const std::string& msg() const;
143
+ bool verbatim() const;
144
+
145
+ private:
146
+ // The type of warning
147
+ warning_variant_t type_;
148
+
149
+ // Where the warning happened.
150
+ SourceLocation source_location_;
151
+
152
+ // The actual warning message.
153
+ std::string msg_;
154
+
155
+ // See note: [Verbatim Warnings]
156
+ bool verbatim_;
157
+ };
158
+
159
+ using UserWarning = Warning::UserWarning;
160
+ using DeprecationWarning = Warning::DeprecationWarning;
161
+
162
+ // Issue a warning with a given message. Dispatched to the current
163
+ // warning handler.
164
+ void C10_API warn(const Warning& warning);
165
+
166
+ class C10_API WarningHandler {
167
+ public:
168
+ virtual ~WarningHandler() = default;
169
+ /// The default warning handler. Prints the message to stderr.
170
+ virtual void process(const Warning& warning);
171
+ };
172
+
173
+ namespace WarningUtils {
174
+
175
+ // Note: [Verbatim Warnings]
176
+ // Warnings originating in C++ code can appear out-of-place to Python users:
177
+ // a user runs a line in Python, but the warning references a line in C++.
178
+ // Some parts of PyTorch, like the JIT, are cognizant of this mismatch
179
+ // and take care to map warnings back to the user's program, but most
180
+ // of PyTorch simply throws a context-free warning. To allow warning
181
+ // handlers to add context where appropriate, warn takes the
182
+ // "verbatim" flag. When this is false a warning handler might append
183
+ // the C++ warning to a Python warning message that relates the warning
184
+ // back to the user's program. Callers who have already accounted for
185
+ // context in their warnings should set verbatim to true so their warnings
186
+ // appear without modification.
187
+
188
+ /// Sets the global warning handler. This is not thread-safe, so it should
189
+ /// generally be called once during initialization or while holding the GIL
190
+ /// for programs that use python.
191
+ /// User is responsible for keeping the WarningHandler alive until
192
+ /// it is not needed.
193
+ C10_API void set_warning_handler(WarningHandler* handler) noexcept(true);
194
+ /// Gets the global warning handler.
195
+ C10_API WarningHandler* get_warning_handler() noexcept(true);
196
+
197
+ class C10_API WarningHandlerGuard {
198
+ WarningHandler* prev_handler_;
199
+
200
+ public:
201
+ WarningHandlerGuard(WarningHandler* new_handler)
202
+ : prev_handler_(c10::WarningUtils::get_warning_handler()) {
203
+ c10::WarningUtils::set_warning_handler(new_handler);
204
+ }
205
+ ~WarningHandlerGuard() {
206
+ c10::WarningUtils::set_warning_handler(prev_handler_);
207
+ }
208
+ };
209
+
210
+ /// The TORCH_WARN_ONCE macro is difficult to test for. Use
211
+ /// setWarnAlways(true) to turn it into TORCH_WARN, which can be
212
+ /// tested for more easily.
213
+ C10_API void set_warnAlways(bool) noexcept(true);
214
+ C10_API bool get_warnAlways() noexcept(true);
215
+
216
+ // A RAII guard that sets warn_always (not thread-local) on
217
+ // construction, and sets it back to the original value upon destruction.
218
+ struct C10_API WarnAlways {
219
+ public:
220
+ explicit WarnAlways(bool setting = true);
221
+ ~WarnAlways();
222
+
223
+ private:
224
+ bool prev_setting;
225
+ };
226
+
227
+ } // namespace WarningUtils
228
+
229
+ // Like Error, but we always report the C++ backtrace, instead of only
230
+ // reporting when TORCH_SHOW_CPP_STACKTRACES
231
+ class C10_API ErrorAlwaysShowCppStacktrace : public Error {
232
+ using Error::Error;
233
+ const char* what_without_backtrace() const noexcept override {
234
+ return what();
235
+ }
236
+ };
237
+
238
+ // Used in ATen for out-of-bound indices that can reasonably only be detected
239
+ // lazily inside a kernel (See: advanced indexing). These turn into
240
+ // IndexError when they cross to Python.
241
+ class C10_API IndexError : public Error {
242
+ using Error::Error;
243
+ };
244
+
245
+ // Used in ATen for invalid values. These turn into
246
+ // ValueError when they cross to Python.
247
+ class C10_API ValueError : public Error {
248
+ using Error::Error;
249
+ };
250
+
251
+ // Used in ATen for invalid types. These turn into
252
+ // TypeError when they cross to Python.
253
+ class C10_API TypeError : public Error {
254
+ using Error::Error;
255
+ };
256
+
257
+ // Used in ATen for functionality that is not implemented. These turn into
258
+ // NotImplementedError when they cross to Python.
259
+ class C10_API NotImplementedError : public Error {
260
+ using Error::Error;
261
+ };
262
+
263
+ // Used in ATen for non finite indices. These turn into
264
+ // ExitException when they cross to Python.
265
+ class C10_API EnforceFiniteError : public Error {
266
+ using Error::Error;
267
+ };
268
+
269
+ // Used in Onnxifi backend lowering. These turn into
270
+ // ExitException when they cross to Python.
271
+ class C10_API OnnxfiBackendSystemError : public Error {
272
+ using Error::Error;
273
+ };
274
+
275
+ // Used for numerical errors from the linalg module. These
276
+ // turn into LinAlgError when they cross into Python.
277
+ class C10_API LinAlgError : public Error {
278
+ using Error::Error;
279
+ };
280
+
281
+ class C10_API OutOfMemoryError : public Error {
282
+ using Error::Error;
283
+ };
284
+
285
+ // Base error type for all distributed errors.
286
+ // These turn into DistError when they cross into Python.
287
+ class C10_API DistError : public Error {
288
+ using Error::Error;
289
+ };
290
+
291
+ // Used for collective communication library errors from the distributed module.
292
+ // These turn into DistBackendError when they cross into Python.
293
+ class C10_API DistBackendError : public DistError {
294
+ using DistError::DistError;
295
+ };
296
+
297
+ // Used for errors originating from the store.
298
+ // These turn into DistStoreError when they cross into Python.
299
+ class C10_API DistStoreError : public DistError {
300
+ using DistError::DistError;
301
+ };
302
+
303
+ // Used for errors originating from the TCP/IP stack and not from collective
304
+ // libraries. These turn into DistNetworkError when they cross into Python.
305
+ class C10_API DistNetworkError : public DistError {
306
+ using DistError::DistError;
307
+ };
308
+
309
+ // A utility function to return an exception std::string by prepending its
310
+ // exception type before its what() content
311
+ C10_API std::string GetExceptionString(const std::exception& e);
312
+
313
+ } // namespace c10
314
+
315
+ // Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK
316
+ //
317
+ // Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a
318
+ // int32_t), which is different from the definition of `SourceLocation` that
319
+ // requires unsigned int (a.k.a uint32_t) and may cause a compile error with the
320
+ // message: error C2397: conversion from 'long' to 'uint32_t' requires a
321
+ // narrowing conversion Here the static cast is used to pass the build. if this
322
+ // is used inside a lambda the __func__ macro expands to operator(), which isn't
323
+ // very useful, but hard to fix in a macro so suppressing the warning.
324
+ #define C10_THROW_ERROR(err_type, msg) \
325
+ throw ::c10::err_type( \
326
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
327
+
328
+ #define C10_BUILD_ERROR(err_type, msg) \
329
+ ::c10::err_type({__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, msg)
330
+
331
+ // Private helper macro for workaround MSVC misexpansion of nested macro
332
+ // invocations involving __VA_ARGS__. See
333
+ // https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
334
+ #define C10_EXPAND_MSVC_WORKAROUND(x) x
335
+
336
+ // On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases
337
+ // where the unlikely expression may be a constant, use this macro to ensure
338
+ // return statement analysis keeps working (at the cost of not getting the
339
+ // likely/unlikely annotation on nvcc).
340
+ // https://github.com/pytorch/pytorch/issues/21418
341
+ //
342
+ // Currently, this is only used in the error reporting macros below. If you
343
+ // want to use it more generally, move me to Macros.h
344
+ //
345
+ // TODO: Brian Vaughan observed that we might be able to get this to work on
346
+ // nvcc by writing some sort of C++ overload that distinguishes constexpr inputs
347
+ // from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY
348
+ // in nvcc is causing us perf problems, this is not yet implemented, but this
349
+ // might be an interesting piece of C++ code for an intrepid bootcamper to
350
+ // write.
351
+ #if defined(__CUDACC__)
352
+ #define C10_UNLIKELY_OR_CONST(e) e
353
+ #else
354
+ #define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e)
355
+ #endif
356
+
357
+ // ----------------------------------------------------------------------------
358
+ // Error reporting macros
359
+ // ----------------------------------------------------------------------------
360
+
361
+ #ifdef STRIP_ERROR_MESSAGES
362
+ #define TORCH_RETHROW(e, ...) throw
363
+ #else
364
+ #define TORCH_RETHROW(e, ...) \
365
+ do { \
366
+ e.add_context(::c10::str(__VA_ARGS__)); \
367
+ throw; \
368
+ } while (false)
369
+ #endif
370
+
371
+ // A utility macro to provide assert()-like functionality; that is, enforcement
372
+ // of internal invariants in code. It supports an arbitrary number of extra
373
+ // arguments (evaluated only on failure), which will be printed in the assert
374
+ // failure message using operator<< (this is useful to print some variables
375
+ // which may be useful for debugging.)
376
+ //
377
+ // Usage:
378
+ // TORCH_INTERNAL_ASSERT(should_be_true);
379
+ // TORCH_INTERNAL_ASSERT(x == 0, "x = ", x);
380
+ //
381
+ // Assuming no bugs in PyTorch, the conditions tested by this macro should
382
+ // always be true; e.g., it should be possible to disable all of these
383
+ // conditions without changing observable user behavior. If you would like to
384
+ // do error reporting for user input, please use TORCH_CHECK instead.
385
+ //
386
+ // NOTE: It is SAFE to use this macro in production code; on failure, this
387
+ // simply raises an exception, it does NOT unceremoniously quit the process
388
+ // (unlike assert()).
389
+ //
390
+ #ifdef STRIP_ERROR_MESSAGES
391
+ #define TORCH_INTERNAL_ASSERT(cond, ...) \
392
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
393
+ ::c10::detail::torchCheckFail( \
394
+ __func__, \
395
+ __FILE__, \
396
+ static_cast<uint32_t>(__LINE__), \
397
+ #cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \
398
+ }
399
+ #else
400
+ // It would be nice if we could build a combined string literal out of
401
+ // the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal
402
+ // as the first argument, but there doesn't seem to be any good way to
403
+ // do that while still supporting having a first argument that isn't a
404
+ // string literal.
405
+ #define TORCH_INTERNAL_ASSERT(cond, ...) \
406
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
407
+ ::c10::detail::torchInternalAssertFail( \
408
+ __func__, \
409
+ __FILE__, \
410
+ static_cast<uint32_t>(__LINE__), \
411
+ #cond \
412
+ " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \
413
+ __LINE__) ", please report a bug to PyTorch. ", \
414
+ c10::str(__VA_ARGS__)); \
415
+ }
416
+ #endif
417
+
418
+ // A utility macro to make it easier to test for error conditions from user
419
+ // input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra
420
+ // arguments (evaluated only on failure), which will be printed in the error
421
+ // message using operator<< (e.g., you can pass any object which has
422
+ // operator<< defined. Most objects in PyTorch have these definitions!)
423
+ //
424
+ // Usage:
425
+ // TORCH_CHECK(should_be_true); // A default error message will be provided
426
+ // // in this case; but we recommend writing an
427
+ // // explicit error message, as it is more
428
+ // // user friendly.
429
+ // TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x);
430
+ //
431
+ // On failure, this macro will raise an exception. If this exception propagates
432
+ // to Python, it will convert into a Python RuntimeError.
433
+ //
434
+ // NOTE: It is SAFE to use this macro in production code; on failure, this
435
+ // simply raises an exception, it does NOT unceremoniously quit the process
436
+ // (unlike CHECK() from glog.)
437
+ //
438
+ #define TORCH_CHECK_WITH(error_t, cond, ...) \
439
+ TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__)
440
+
441
+ #ifdef STRIP_ERROR_MESSAGES
442
+ #define TORCH_CHECK_MSG(cond, type, ...) \
443
+ (#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__))
444
+ #define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
445
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
446
+ C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
447
+ }
448
+ #else
449
+
450
+ namespace c10::detail {
451
+ template <typename... Args>
452
+ decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) {
453
+ return ::c10::str(args...);
454
+ }
455
+ inline C10_API const char* torchCheckMsgImpl(const char* msg) {
456
+ return msg;
457
+ }
458
+ // If there is just 1 user-provided C-string argument, use it.
459
+ inline C10_API const char* torchCheckMsgImpl(
460
+ const char* /*msg*/,
461
+ const char* args) {
462
+ return args;
463
+ }
464
+ } // namespace c10::detail
465
+
466
+ #define TORCH_CHECK_MSG(cond, type, ...) \
467
+ (::c10::detail::torchCheckMsgImpl( \
468
+ "Expected " #cond \
469
+ " to be true, but got false. " \
470
+ "(Could this error message be improved? If so, " \
471
+ "please report an enhancement request to PyTorch.)", \
472
+ ##__VA_ARGS__))
473
+ #define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \
474
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
475
+ C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \
476
+ }
477
+ #endif
478
+
479
+ namespace c10::detail {
480
+
481
+ [[noreturn]] C10_API void torchCheckFail(
482
+ const char* func,
483
+ const char* file,
484
+ uint32_t line,
485
+ const std::string& msg);
486
+ [[noreturn]] C10_API void torchCheckFail(
487
+ const char* func,
488
+ const char* file,
489
+ uint32_t line,
490
+ const char* msg);
491
+
492
+ // The c10::str() call that creates userMsg can have 1 of 3 return
493
+ // types depending on the number and types of arguments passed to
494
+ // TORCH_INTERNAL_ASSERT. 0 arguments will get a
495
+ // CompileTimeEmptyString, 1 const char * will be passed straight
496
+ // through, and anything else will get converted to std::string.
497
+ [[noreturn]] C10_API void torchInternalAssertFail(
498
+ const char* func,
499
+ const char* file,
500
+ uint32_t line,
501
+ const char* condMsg,
502
+ const char* userMsg);
503
+ [[noreturn]] inline C10_API void torchInternalAssertFail(
504
+ const char* func,
505
+ const char* file,
506
+ uint32_t line,
507
+ const char* condMsg,
508
+ ::c10::detail::CompileTimeEmptyString /*userMsg*/) {
509
+ torchCheckFail(func, file, line, condMsg);
510
+ }
511
+ [[noreturn]] C10_API void torchInternalAssertFail(
512
+ const char* func,
513
+ const char* file,
514
+ uint32_t line,
515
+ const char* condMsg,
516
+ const std::string& userMsg);
517
+
518
+ } // namespace c10::detail
519
+
520
+ #ifdef STRIP_ERROR_MESSAGES
521
+ #define TORCH_CHECK(cond, ...) \
522
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
523
+ ::c10::detail::torchCheckFail( \
524
+ __func__, \
525
+ __FILE__, \
526
+ static_cast<uint32_t>(__LINE__), \
527
+ TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \
528
+ }
529
+ #else
530
+ #define TORCH_CHECK(cond, ...) \
531
+ if (C10_UNLIKELY_OR_CONST(!(cond))) { \
532
+ ::c10::detail::torchCheckFail( \
533
+ __func__, \
534
+ __FILE__, \
535
+ static_cast<uint32_t>(__LINE__), \
536
+ TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \
537
+ }
538
+ #endif
539
+
540
+ // An utility macro that does what `TORCH_CHECK` does if compiled in the host
541
+ // code, otherwise does nothing. Supposed to be used in the code shared between
542
+ // host and device code as an alternative for `TORCH_CHECK`.
543
+ #if defined(__CUDACC__) || defined(__HIPCC__)
544
+ #define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...)
545
+ #else
546
+ #define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__)
547
+ #endif
548
+
549
+ // Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug
550
+ // build, and does nothing in release build. It is appropriate to use
551
+ // in situations where you want to add an assert to a hotpath, but it is
552
+ // too expensive to run this assert on production builds.
553
+ #ifdef NDEBUG
554
+ // Optimized version - generates no code.
555
+ #define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
556
+ while (false) \
557
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
558
+ #else
559
+ #define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \
560
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__))
561
+ #endif
562
+
563
+ // TODO: We're going to get a lot of similar looking string literals
564
+ // this way; check if this actually affects binary size.
565
+
566
+ // Like TORCH_CHECK, but raises LinAlgError instead of Error.
567
+ #define TORCH_CHECK_LINALG(cond, ...) \
568
+ TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__)
569
+
570
+ // Like TORCH_CHECK, but raises IndexErrors instead of Errors.
571
+ #define TORCH_CHECK_INDEX(cond, ...) \
572
+ TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__)
573
+
574
+ // Like TORCH_CHECK, but raises ValueErrors instead of Errors.
575
+ #define TORCH_CHECK_VALUE(cond, ...) \
576
+ TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__)
577
+
578
+ // Like TORCH_CHECK, but raises TypeErrors instead of Errors.
579
+ #define TORCH_CHECK_TYPE(cond, ...) \
580
+ TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__)
581
+
582
+ // Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors.
583
+ #define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \
584
+ TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__)
585
+
586
+ #define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \
587
+ TORCH_CHECK_WITH_MSG( \
588
+ ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__)
589
+
590
+ #ifdef STRIP_ERROR_MESSAGES
591
+ #define WARNING_MESSAGE_STRING(...) \
592
+ ::c10::detail::CompileTimeEmptyString {}
593
+ #else
594
+ #define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__)
595
+ #endif
596
+
597
+ // Report a warning to the user. Accepts an arbitrary number of extra
598
+ // arguments which are concatenated into the warning message using operator<<
599
+ //
600
+ #ifdef DISABLE_WARN
601
+ #define _TORCH_WARN_WITH(...) ((void)0);
602
+ #else
603
+ #define _TORCH_WARN_WITH(warning_t, ...) \
604
+ ::c10::warn(::c10::Warning( \
605
+ warning_t(), \
606
+ {__func__, __FILE__, static_cast<uint32_t>(__LINE__)}, \
607
+ WARNING_MESSAGE_STRING(__VA_ARGS__), \
608
+ false));
609
+ #endif
610
+
611
+ #define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__);
612
+
613
+ #define TORCH_WARN_DEPRECATION(...) \
614
+ _TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__);
615
+
616
+ // Report a warning to the user only once. Accepts an arbitrary number of extra
617
+ // arguments which are concatenated into the warning message using operator<<
618
+ //
619
+ #define _TORCH_WARN_ONCE(...) \
620
+ C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \
621
+ [&] { \
622
+ TORCH_WARN(__VA_ARGS__); \
623
+ return true; \
624
+ }()
625
+
626
+ #ifdef DISABLE_WARN
627
+ #define TORCH_WARN_ONCE(...) ((void)0);
628
+ #else
629
+ #define TORCH_WARN_ONCE(...) \
630
+ if (::c10::WarningUtils::get_warnAlways()) { \
631
+ TORCH_WARN(__VA_ARGS__); \
632
+ } else { \
633
+ _TORCH_WARN_ONCE(__VA_ARGS__); \
634
+ }
635
+ #endif
636
+
637
+ // Report an error with a specific argument
638
+ // NOTE: using the argument name in TORCH_CHECK's message is preferred
639
+ #define TORCH_CHECK_ARG(cond, argN, ...) \
640
+ TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__)
641
+
642
+ // ----------------------------------------------------------------------------
643
+ // Deprecated macros
644
+ // ----------------------------------------------------------------------------
645
+
646
+ namespace c10::detail {
647
+
648
+ /*
649
+ // Deprecation disabled until we fix sites in our codebase
650
+ C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg)
651
+ instead.")
652
+ */
653
+ inline void deprecated_AT_ERROR() {}
654
+
655
+ /*
656
+ // Deprecation disabled until we fix sites in our codebase
657
+ C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an
658
+ internal invariant failure, use " \
659
+ "TORCH_INTERNAL_ASSERT instead; if you mean to do user
660
+ error checking, use " \ "TORCH_CHECK. See
661
+ https://github.com/pytorch/pytorch/issues/20287 for more details.")
662
+ */
663
+ inline void deprecated_AT_ASSERT() {}
664
+
665
+ /*
666
+ // Deprecation disabled until we fix sites in our codebase
667
+ C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an
668
+ internal invariant failure, use " \
669
+ "TORCH_INTERNAL_ASSERT instead; if you mean to do user
670
+ error checking, use " \ "TORCH_CHECK. See
671
+ https://github.com/pytorch/pytorch/issues/20287 for more details.")
672
+ */
673
+ inline void deprecated_AT_ASSERTM() {}
674
+
675
+ } // namespace c10::detail
676
+
677
+ // Deprecated alias; this alias was deprecated because people kept mistakenly
678
+ // using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK
679
+ // instead. See https://github.com/pytorch/pytorch/issues/20287 for more
680
+ // details.
681
+ #define AT_ASSERT(...) \
682
+ do { \
683
+ ::c10::detail::deprecated_AT_ASSERT(); \
684
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \
685
+ } while (false)
686
+
687
+ // Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro
688
+ // supports both 0-ary and variadic calls, so having a separate
689
+ // message-accepting macro is not necessary.
690
+ //
691
+ // NB: we MUST include cond explicitly here, as MSVC will miscompile the macro
692
+ // expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround
693
+ // can be seen at
694
+ // https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly
695
+ #define AT_ASSERTM(cond, ...) \
696
+ do { \
697
+ ::c10::detail::deprecated_AT_ASSERTM(); \
698
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \
699
+ } while (false)
700
+
701
+ // Deprecated alias; this alias was deprecated because it represents extra API
702
+ // surface that makes it hard for people to understand what macro to use.
703
+ // Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to
704
+ // unconditionally fail at a line of code.
705
+ #define AT_ERROR(...) \
706
+ do { \
707
+ ::c10::detail::deprecated_AT_ERROR(); \
708
+ C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \
709
+ } while (false)
710
+
711
+ #endif // C10_UTIL_EXCEPTION_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/TensorImpl.h>
4
+ #include <c10/core/UndefinedTensorImpl.h>
5
+
6
+ #include <utility>
7
+
8
+ namespace c10 {
9
+ // Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and
10
+ // at::TensorBase.
11
+ template <typename TensorType>
12
+ struct ExclusivelyOwnedTensorTraits {
13
+ using repr_type = TensorType;
14
+ using pointer_type = TensorType*;
15
+ using const_pointer_type = const TensorType*;
16
+
17
+ static repr_type nullRepr() {
18
+ return TensorType();
19
+ }
20
+
21
+ template <class... Args>
22
+ static repr_type createInPlace(Args&&... args) {
23
+ return TensorType(std::forward<Args>(args)...);
24
+ }
25
+
26
+ static repr_type moveToRepr(TensorType&& x) {
27
+ return std::move(x);
28
+ }
29
+
30
+ static void destroyOwned(TensorType& x) {
31
+ TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl();
32
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
33
+ toDestroy != nullptr, "Tensor somehow got null TensorImpl?");
34
+ // May be 0 because UndefinedTensorImpl doesn't get its refcount
35
+ // incremented.
36
+ const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton();
37
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
38
+ toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined),
39
+ "ExclusivelyOwned<Tensor> destroyed with isUndefined ",
40
+ isUndefined,
41
+ " and refcount ",
42
+ toDestroy->refcount_,
43
+ ", expected 1 or, if isUndefined, 0!");
44
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
45
+ toDestroy->weakcount_ == 1 ||
46
+ (toDestroy->weakcount_ == 0 &&
47
+ toDestroy == UndefinedTensorImpl::singleton()),
48
+ "ExclusivelyOwned<Tensor> destroyed with isUndefined ",
49
+ isUndefined,
50
+ " and weakcount ",
51
+ toDestroy->weakcount_,
52
+ ", expected 1 or, if isUndefined, 0!");
53
+ if (!isUndefined) {
54
+ #ifndef NDEBUG
55
+ // Needed to pass the debug assertions in ~intrusive_ptr_target.
56
+ toDestroy->refcount_ = 0;
57
+ toDestroy->weakcount_ = 0;
58
+ #endif
59
+ delete toDestroy;
60
+ }
61
+ }
62
+
63
+ static TensorType take(TensorType& x) {
64
+ return std::move(x);
65
+ }
66
+
67
+ static pointer_type getImpl(repr_type& x) {
68
+ return &x;
69
+ }
70
+
71
+ static const_pointer_type getImpl(const repr_type& x) {
72
+ return &x;
73
+ }
74
+ };
75
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e5m2 type (8-bit floating-point) including conversions
4
+ /// to standard C types and basic arithmetic operations. Note that arithmetic
5
+ /// operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration:
8
+ /// s eeeee mm
9
+ /// 1 sign bit
10
+ /// 5 exponent bits
11
+ /// 2 mantissa bits
12
+ /// bias = 15
13
+ ///
14
+ /// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf
15
+ /// and inspired by Half implementation from pytorch/c10/util/Half.h
16
+
17
+ #include <c10/util/Half.h>
18
+
19
+ namespace c10 {
20
+
21
+ namespace detail {
22
+
23
+ /*
24
+ * Convert a 8-bit floating-point number in fp8 E5M2 format, in bit
25
+ * representation, to a 32-bit floating-point number in IEEE single-precision
26
+ * format, in bit representation.
27
+ *
28
+ * @note The implementation doesn't use any floating-point operations.
29
+ */
30
+ inline C10_HOST_DEVICE float fp8e5m2_to_fp32_value(uint8_t input) {
31
+ /*
32
+ * Extend the fp8 E5M2 number to 32 bits and shift to the
33
+ * upper part of the 32-bit word:
34
+ * +---+----+---+-----------------------------+
35
+ * | S |EEEEE|MM|0000 0000 0000 0000 0000 0000|
36
+ * +---+----+---+-----------------------------+
37
+ * Bits 31 26-30 24-25 0-23
38
+ *
39
+ * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0
40
+ * - zero bits.
41
+ */
42
+ uint16_t half_representation = input;
43
+ half_representation <<= 8;
44
+ return fp16_ieee_to_fp32_value(half_representation);
45
+ }
46
+
47
+ /*
48
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
49
+ * 8-bit floating-point number in fp8 E5M2 format, in bit representation.
50
+ */
51
+ inline C10_HOST_DEVICE uint8_t fp8e5m2_from_fp32_value(float f) {
52
+ /*
53
+ * Binary representation of fp32 infinity
54
+ * 0 11111111 00000000000000000000000
55
+ */
56
+ constexpr uint32_t fp32_inf = UINT32_C(255) << 23;
57
+
58
+ /*
59
+ * Binary representation of 65536.0f, which is the first value
60
+ * not representable in fp8e5m2 range:
61
+ * 0 11111 00 - fp8e5m2
62
+ * 0 10001111 00000000000000000000000 - fp32
63
+ */
64
+ constexpr uint32_t fp8_max = UINT32_C(143) << 23;
65
+
66
+ /*
67
+ * A mask for converting fp32 numbers lower than fp8e5m2 normal range
68
+ * into denorm representation
69
+ * magic number: ((127 - 15) + (23 - 2) + 1)
70
+ */
71
+ constexpr uint32_t denorm_mask = UINT32_C(134) << 23;
72
+
73
+ uint32_t f_bits = fp32_to_bits(f);
74
+ uint8_t result = 0u;
75
+
76
+ /*
77
+ * Extract the sign of the input number into the high bit of the 32-bit word:
78
+ *
79
+ * +---+----------------------------------+
80
+ * | S |0000000 00000000 00000000 00000000|
81
+ * +---+----------------------------------+
82
+ * Bits 31 0-31
83
+ */
84
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
85
+
86
+ /*
87
+ * Set sign bit to 0
88
+ */
89
+ f_bits ^= sign;
90
+
91
+ if (f_bits >= fp8_max) {
92
+ // NaN - all exponent and mantissa bits set to 1
93
+ result = f_bits > fp32_inf ? UINT8_C(0x7F) : UINT8_C(0x7C);
94
+ } else {
95
+ if (f_bits < (UINT32_C(113) << 23)) {
96
+ // Input number is smaller than 2^(-14), which is the smallest
97
+ // fp8e5m2 normal number
98
+ f_bits =
99
+ fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
100
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
101
+ } else {
102
+ // resulting mantissa is odd
103
+ uint32_t mant_odd = (f_bits >> 21) & 1;
104
+
105
+ // update exponent, rounding bias part 1
106
+ f_bits += ((uint32_t)(15 - 127) << 23) + 0xFFFFF;
107
+
108
+ // rounding bias part 2
109
+ f_bits += mant_odd;
110
+
111
+ // take the bits!
112
+ result = static_cast<uint8_t>(f_bits >> 21);
113
+ }
114
+ }
115
+
116
+ result |= static_cast<uint8_t>(sign >> 24);
117
+ return result;
118
+ }
119
+
120
+ } // namespace detail
121
+
122
+ struct alignas(1) Float8_e5m2 {
123
+ uint8_t x;
124
+
125
+ struct from_bits_t {};
126
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
127
+ return from_bits_t();
128
+ }
129
+
130
+ Float8_e5m2() = default;
131
+
132
+ constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {}
133
+ inline C10_HOST_DEVICE Float8_e5m2(float value);
134
+ inline C10_HOST_DEVICE operator float() const;
135
+ inline C10_HOST_DEVICE bool isnan() const;
136
+ inline C10_HOST_DEVICE bool isinf() const;
137
+ };
138
+
139
+ C10_API std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value);
140
+
141
+ } // namespace c10
142
+
143
+ #include <c10/util/Float8_e5m2-inl.h> // IWYU pragma: keep
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ /// Defines the Float8_e5m2fnuz type (8-bit floating-point) including
4
+ /// conversions to standard C types and basic arithmetic operations. Note that
5
+ /// arithmetic operations are implemented by converting to floating point and
6
+ /// performing the operation in float32.
7
+ /// Binary configuration remains the same as e5m2:
8
+ /// s eeeee mm
9
+ /// 1 sign bit
10
+ /// 5 exponent bits
11
+ /// 2 mantissa bits
12
+ /// The key differences that e5m2fnuz brings are:
13
+ /// bias = 16
14
+ /// no infinities or negative zero
15
+ /// NaN only when sign bit is 1, rest all 0s
16
+ ///
17
+ /// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and
18
+ /// the existing Float8_e4m3fn implementation.
19
+
20
+ #include <c10/macros/Macros.h>
21
+ #include <c10/util/TypeSafeSignMath.h>
22
+ #include <c10/util/floating_point_utils.h>
23
+
24
+ #if defined(__cplusplus) && (__cplusplus >= 201103L)
25
+ #include <cstdint>
26
+ #elif !defined(__OPENCL_VERSION__)
27
+ #include <math.h>
28
+ #include <stdint.h>
29
+ #endif
30
+
31
+ #include <iosfwd>
32
+ #include <ostream>
33
+
34
+ namespace c10 {
35
+
36
+ namespace detail {
37
+
38
+ /*
39
+ * Convert a 32-bit floating-point number in IEEE single-precision format to a
40
+ * 8-bit floating-point number in fp8 E5M2 format, in bit representation.
41
+ */
42
+ inline C10_HOST_DEVICE uint8_t fp8e5m2fnuz_from_fp32_value(float f) {
43
+ /*
44
+ * Binary representation of 65536.0f, which is the first value not
45
+ * representable (i.e. the first value which would overflow in to the sign
46
+ * bit, resulting in a NaN) in fp8e4m3fnuz range:
47
+ * 1 00000 00 - fp8e5m2fnuz
48
+ * 0 10001111 00000000000000000000000 - fp32
49
+ */
50
+ constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23;
51
+
52
+ /*
53
+ * A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range
54
+ * into denormalized representation.
55
+ * magic number: ((127 - 16) + (23 - 2) + 1)
56
+ */
57
+ constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23;
58
+
59
+ uint32_t f_bits = fp32_to_bits(f);
60
+ uint32_t result = 0u;
61
+
62
+ /*
63
+ * Extract the sign of the input number into the high bit of the 32-bit word:
64
+ *
65
+ * +---+----------------------------------+
66
+ * | S |0000000 00000000 00000000 00000000|
67
+ * +---+----------------------------------+
68
+ * Bits 31 0-31
69
+ */
70
+ const uint32_t sign = f_bits & UINT32_C(0x80000000);
71
+
72
+ /*
73
+ * Set sign bit to 0
74
+ */
75
+ f_bits ^= sign;
76
+
77
+ if (f_bits >= fnuz_max) {
78
+ // NaN -- sign bit set to 1, rest 0s
79
+ return 0x80;
80
+ }
81
+
82
+ if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) {
83
+ // Input exponent is less than -15, the smallest e5m2fnuz exponent, so the
84
+ // number will become subnormal.
85
+ f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask));
86
+ result = static_cast<uint8_t>(f_bits - denorm_mask);
87
+ if (result == 0) {
88
+ // fnuz types don't have negative zero.
89
+ return 0;
90
+ }
91
+ } else {
92
+ // resulting mantissa is odd
93
+ uint8_t mant_odd = (f_bits >> 21) & 1;
94
+
95
+ // update exponent, rounding bias part 1
96
+ f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF;
97
+
98
+ // rounding bias part 2
99
+ f_bits += mant_odd;
100
+
101
+ // take the bits!
102
+ result = static_cast<uint8_t>(f_bits >> 21);
103
+ }
104
+
105
+ result |= sign >> 24;
106
+ return result;
107
+ }
108
+
109
+ } // namespace detail
110
+
111
+ struct alignas(1) Float8_e5m2fnuz {
112
+ uint8_t x;
113
+
114
+ struct from_bits_t {};
115
+ C10_HOST_DEVICE static constexpr from_bits_t from_bits() {
116
+ return from_bits_t();
117
+ }
118
+
119
+ Float8_e5m2fnuz() = default;
120
+
121
+ constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t)
122
+ : x(bits) {}
123
+ inline C10_HOST_DEVICE Float8_e5m2fnuz(float value);
124
+ inline C10_HOST_DEVICE operator float() const;
125
+ inline C10_HOST_DEVICE bool isnan() const;
126
+ inline C10_HOST_DEVICE bool isinf() const;
127
+ };
128
+
129
+ C10_API std::ostream& operator<<(
130
+ std::ostream& out,
131
+ const Float8_e5m2fnuz& value);
132
+
133
+ } // namespace c10
134
+
135
+ #include <c10/util/Float8_e5m2fnuz-inl.h> // IWYU pragma: keep
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <cstddef>
4
+ #include <functional>
5
+ #include <utility>
6
+
7
+ namespace c10 {
8
+
9
+ /**
10
+ * This template simplifies generation of simple classes that wrap an id
11
+ * in a typesafe way. Namely, you can use it to create a very lightweight
12
+ * type that only offers equality comparators and hashing. Example:
13
+ *
14
+ * struct MyIdType final : IdWrapper<MyIdType, uint32_t> {
15
+ * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {}
16
+ * };
17
+ *
18
+ * Then in the global top level namespace:
19
+ *
20
+ * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType);
21
+ *
22
+ * That's it - equality operators and hash functions are automatically defined
23
+ * for you, given the underlying type supports it.
24
+ */
25
+ template <class ConcreteType, class UnderlyingType>
26
+ class IdWrapper {
27
+ public:
28
+ using underlying_type = UnderlyingType;
29
+ using concrete_type = ConcreteType;
30
+
31
+ protected:
32
+ constexpr explicit IdWrapper(underlying_type id) noexcept(
33
+ noexcept(underlying_type(std::declval<underlying_type>())))
34
+ : id_(id) {}
35
+
36
+ constexpr underlying_type underlyingId() const
37
+ noexcept(noexcept(underlying_type(std::declval<underlying_type>()))) {
38
+ return id_;
39
+ }
40
+
41
+ private:
42
+ friend size_t hash_value(const concrete_type& v) {
43
+ return std::hash<underlying_type>()(v.id_);
44
+ }
45
+
46
+ // TODO Making operator== noexcept if underlying type is noexcept equality
47
+ // comparable doesn't work with GCC 4.8.
48
+ // Fix this once we don't need GCC 4.8 anymore.
49
+ friend constexpr bool operator==(
50
+ const concrete_type& lhs,
51
+ const concrete_type& rhs) noexcept {
52
+ return lhs.id_ == rhs.id_;
53
+ }
54
+
55
+ // TODO Making operator!= noexcept if operator== is noexcept doesn't work with
56
+ // GCC 4.8.
57
+ // Fix this once we don't need GCC 4.8 anymore.
58
+ friend constexpr bool operator!=(
59
+ const concrete_type& lhs,
60
+ const concrete_type& rhs) noexcept {
61
+ return !(lhs == rhs);
62
+ }
63
+
64
+ underlying_type id_;
65
+ };
66
+
67
+ } // namespace c10
68
+
69
+ #define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \
70
+ namespace std { \
71
+ template <> \
72
+ struct hash<ClassName> { \
73
+ size_t operator()(ClassName x) const { \
74
+ return hash_value(x); \
75
+ } \
76
+ }; \
77
+ }
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <c10/macros/Macros.h>
2
+ #include <c10/util/Synchronized.h>
3
+ #include <array>
4
+ #include <atomic>
5
+ #include <mutex>
6
+ #include <thread>
7
+
8
+ namespace c10 {
9
+
10
+ namespace detail {
11
+
12
+ struct IncrementRAII final {
13
+ public:
14
+ explicit IncrementRAII(std::atomic<int32_t>* counter) : _counter(counter) {
15
+ _counter->fetch_add(1);
16
+ }
17
+
18
+ ~IncrementRAII() {
19
+ _counter->fetch_sub(1);
20
+ }
21
+
22
+ private:
23
+ std::atomic<int32_t>* _counter;
24
+
25
+ C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII);
26
+ };
27
+
28
+ } // namespace detail
29
+
30
+ // LeftRight wait-free readers synchronization primitive
31
+ // https://hal.archives-ouvertes.fr/hal-01207881/document
32
+ //
33
+ // LeftRight is quite easy to use (it can make an arbitrary
34
+ // data structure permit wait-free reads), but it has some
35
+ // particular performance characteristics you should be aware
36
+ // of if you're deciding to use it:
37
+ //
38
+ // - Reads still incur an atomic write (this is how LeftRight
39
+ // keeps track of how long it needs to keep around the old
40
+ // data structure)
41
+ //
42
+ // - Writes get executed twice, to keep both the left and right
43
+ // versions up to date. So if your write is expensive or
44
+ // nondeterministic, this is also an inappropriate structure
45
+ //
46
+ // LeftRight is used fairly rarely in PyTorch's codebase. If you
47
+ // are still not sure if you need it or not, consult your local
48
+ // C++ expert.
49
+ //
50
+ template <class T>
51
+ class LeftRight final {
52
+ public:
53
+ template <class... Args>
54
+ explicit LeftRight(const Args&... args)
55
+ : _counters{{{0}, {0}}},
56
+ _foregroundCounterIndex(0),
57
+ _foregroundDataIndex(0),
58
+ _data{{T{args...}, T{args...}}},
59
+ _writeMutex() {}
60
+
61
+ // Copying and moving would not be threadsafe.
62
+ // Needs more thought and careful design to make that work.
63
+ LeftRight(const LeftRight&) = delete;
64
+ LeftRight(LeftRight&&) noexcept = delete;
65
+ LeftRight& operator=(const LeftRight&) = delete;
66
+ LeftRight& operator=(LeftRight&&) noexcept = delete;
67
+
68
+ ~LeftRight() {
69
+ // wait until any potentially running writers are finished
70
+ { std::unique_lock<std::mutex> lock(_writeMutex); }
71
+
72
+ // wait until any potentially running readers are finished
73
+ while (_counters[0].load() != 0 || _counters[1].load() != 0) {
74
+ std::this_thread::yield();
75
+ }
76
+ }
77
+
78
+ template <typename F>
79
+ auto read(F&& readFunc) const {
80
+ detail::IncrementRAII _increment_counter(
81
+ &_counters[_foregroundCounterIndex.load()]);
82
+
83
+ return std::forward<F>(readFunc)(_data[_foregroundDataIndex.load()]);
84
+ }
85
+
86
+ // Throwing an exception in writeFunc is ok but causes the state to be either
87
+ // the old or the new state, depending on if the first or the second call to
88
+ // writeFunc threw.
89
+ template <typename F>
90
+ auto write(F&& writeFunc) {
91
+ std::unique_lock<std::mutex> lock(_writeMutex);
92
+
93
+ return _write(std::forward<F>(writeFunc));
94
+ }
95
+
96
+ private:
97
+ template <class F>
98
+ auto _write(const F& writeFunc) {
99
+ /*
100
+ * Assume, A is in background and B in foreground. In simplified terms, we
101
+ * want to do the following:
102
+ * 1. Write to A (old background)
103
+ * 2. Switch A/B
104
+ * 3. Write to B (new background)
105
+ *
106
+ * More detailed algorithm (explanations on why this is important are below
107
+ * in code):
108
+ * 1. Write to A
109
+ * 2. Switch A/B data pointers
110
+ * 3. Wait until A counter is zero
111
+ * 4. Switch A/B counters
112
+ * 5. Wait until B counter is zero
113
+ * 6. Write to B
114
+ */
115
+
116
+ auto localDataIndex = _foregroundDataIndex.load();
117
+
118
+ // 1. Write to A
119
+ _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
120
+
121
+ // 2. Switch A/B data pointers
122
+ localDataIndex = localDataIndex ^ 1;
123
+ _foregroundDataIndex = localDataIndex;
124
+
125
+ /*
126
+ * 3. Wait until A counter is zero
127
+ *
128
+ * In the previous write run, A was foreground and B was background.
129
+ * There was a time after switching _foregroundDataIndex (B to foreground)
130
+ * and before switching _foregroundCounterIndex, in which new readers could
131
+ * have read B but incremented A's counter.
132
+ *
133
+ * In this current run, we just switched _foregroundDataIndex (A back to
134
+ * foreground), but before writing to the new background B, we have to make
135
+ * sure A's counter was zero briefly, so all these old readers are gone.
136
+ */
137
+ auto localCounterIndex = _foregroundCounterIndex.load();
138
+ _waitForBackgroundCounterToBeZero(localCounterIndex);
139
+
140
+ /*
141
+ * 4. Switch A/B counters
142
+ *
143
+ * Now that we know all readers on B are really gone, we can switch the
144
+ * counters and have new readers increment A's counter again, which is the
145
+ * correct counter since they're reading A.
146
+ */
147
+ localCounterIndex = localCounterIndex ^ 1;
148
+ _foregroundCounterIndex = localCounterIndex;
149
+
150
+ /*
151
+ * 5. Wait until B counter is zero
152
+ *
153
+ * This waits for all the readers on B that came in while both data and
154
+ * counter for B was in foreground, i.e. normal readers that happened
155
+ * outside of that brief gap between switching data and counter.
156
+ */
157
+ _waitForBackgroundCounterToBeZero(localCounterIndex);
158
+
159
+ // 6. Write to B
160
+ return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex);
161
+ }
162
+
163
+ template <class F>
164
+ auto _callWriteFuncOnBackgroundInstance(
165
+ const F& writeFunc,
166
+ uint8_t localDataIndex) {
167
+ try {
168
+ return writeFunc(_data[localDataIndex ^ 1]);
169
+ } catch (...) {
170
+ // recover invariant by copying from the foreground instance
171
+ _data[localDataIndex ^ 1] = _data[localDataIndex];
172
+ // rethrow
173
+ throw;
174
+ }
175
+ }
176
+
177
+ void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) {
178
+ while (_counters[counterIndex ^ 1].load() != 0) {
179
+ std::this_thread::yield();
180
+ }
181
+ }
182
+
183
+ mutable std::array<std::atomic<int32_t>, 2> _counters;
184
+ std::atomic<uint8_t> _foregroundCounterIndex;
185
+ std::atomic<uint8_t> _foregroundDataIndex;
186
+ std::array<T, 2> _data;
187
+ std::mutex _writeMutex;
188
+ };
189
+
190
+ // RWSafeLeftRightWrapper is API compatible with LeftRight and uses a
191
+ // read-write lock to protect T (data).
192
+ template <class T>
193
+ class RWSafeLeftRightWrapper final {
194
+ public:
195
+ template <class... Args>
196
+ explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {}
197
+
198
+ // RWSafeLeftRightWrapper is not copyable or moveable since LeftRight
199
+ // is not copyable or moveable.
200
+ RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete;
201
+ RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete;
202
+ RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete;
203
+ RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete;
204
+
205
+ template <typename F>
206
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
207
+ auto read(F&& readFunc) const {
208
+ return data_.withLock(
209
+ [&readFunc](T const& data) { return std::forward<F>(readFunc)(data); });
210
+ }
211
+
212
+ template <typename F>
213
+ // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward)
214
+ auto write(F&& writeFunc) {
215
+ return data_.withLock(
216
+ [&writeFunc](T& data) { return std::forward<F>(writeFunc)(data); });
217
+ }
218
+
219
+ private:
220
+ c10::Synchronized<T> data_;
221
+ };
222
+
223
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Optional.h ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_OPTIONAL_H_
2
+ #define C10_UTIL_OPTIONAL_H_
3
+
4
+ #include <optional>
5
+ #include <type_traits>
6
+
7
+ // Macros.h is not needed, but it does namespace shenanigans that lots
8
+ // of downstream code seems to rely on. Feel free to remove it and fix
9
+ // up builds.
10
+
11
+ namespace c10 {
12
+ // NOLINTNEXTLINE(misc-unused-using-decls)
13
+ using std::bad_optional_access;
14
+ // NOLINTNEXTLINE(misc-unused-using-decls)
15
+ using std::make_optional;
16
+ // NOLINTNEXTLINE(misc-unused-using-decls)
17
+ using std::nullopt;
18
+ // NOLINTNEXTLINE(misc-unused-using-decls)
19
+ using std::nullopt_t;
20
+ // NOLINTNEXTLINE(misc-unused-using-decls)
21
+ using std::optional;
22
+
23
+ namespace detail_ {
24
+ // the call to convert<A>(b) has return type A and converts b to type A iff b
25
+ // decltype(b) is implicitly convertible to A
26
+ template <class U>
27
+ constexpr U convert(U v) {
28
+ return v;
29
+ }
30
+ } // namespace detail_
31
+ template <class T, class F>
32
+ constexpr T value_or_else(const optional<T>& v, F&& func) {
33
+ static_assert(
34
+ std::is_convertible_v<typename std::invoke_result_t<F>, T>,
35
+ "func parameters must be a callable that returns a type convertible to the value stored in the optional");
36
+ return v.has_value() ? *v : detail_::convert<T>(std::forward<F>(func)());
37
+ }
38
+
39
+ template <class T, class F>
40
+ constexpr T value_or_else(optional<T>&& v, F&& func) {
41
+ static_assert(
42
+ std::is_convertible_v<typename std::invoke_result_t<F>, T>,
43
+ "func parameters must be a callable that returns a type convertible to the value stored in the optional");
44
+ return v.has_value() ? constexpr_move(std::move(v).contained_val())
45
+ : detail_::convert<T>(std::forward<F>(func)());
46
+ }
47
+ } // namespace c10
48
+ #endif // C10_UTIL_OPTIONAL_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // This file defines OptionalArrayRef<T>, a class that has almost the same
2
+ // exact functionality as c10::optional<ArrayRef<T>>, except that its
3
+ // converting constructor fixes a dangling pointer issue.
4
+ //
5
+ // The implicit converting constructor of both c10::optional<ArrayRef<T>> and
6
+ // std::optional<ArrayRef<T>> can cause the underlying ArrayRef<T> to store
7
+ // a dangling pointer. OptionalArrayRef<T> prevents this by wrapping
8
+ // a c10::optional<ArrayRef<T>> and fixing the constructor implementation.
9
+ //
10
+ // See https://github.com/pytorch/pytorch/issues/63645 for more on this.
11
+
12
+ #pragma once
13
+
14
+ #include <c10/util/ArrayRef.h>
15
+ #include <c10/util/Optional.h>
16
+ #include <cstdint>
17
+ #include <initializer_list>
18
+ #include <type_traits>
19
+ #include <utility>
20
+
21
+ namespace c10 {
22
+
23
+ template <typename T>
24
+ class OptionalArrayRef final {
25
+ public:
26
+ // Constructors
27
+
28
+ constexpr OptionalArrayRef() noexcept = default;
29
+
30
+ constexpr OptionalArrayRef(nullopt_t) noexcept {}
31
+
32
+ OptionalArrayRef(const OptionalArrayRef& other) = default;
33
+
34
+ OptionalArrayRef(OptionalArrayRef&& other) noexcept = default;
35
+
36
+ constexpr OptionalArrayRef(const optional<ArrayRef<T>>& other) noexcept
37
+ : wrapped_opt_array_ref(other) {}
38
+
39
+ constexpr OptionalArrayRef(optional<ArrayRef<T>>&& other) noexcept
40
+ : wrapped_opt_array_ref(std::move(other)) {}
41
+
42
+ constexpr OptionalArrayRef(const T& value) noexcept
43
+ : wrapped_opt_array_ref(value) {}
44
+
45
+ template <
46
+ typename U = ArrayRef<T>,
47
+ std::enable_if_t<
48
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
49
+ !std::is_same_v<std::decay_t<U>, std::in_place_t> &&
50
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
51
+ std::is_convertible_v<U&&, ArrayRef<T>> &&
52
+ !std::is_convertible_v<U&&, T>,
53
+ bool> = false>
54
+ constexpr OptionalArrayRef(U&& value) noexcept(
55
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&>)
56
+ : wrapped_opt_array_ref(std::forward<U>(value)) {}
57
+
58
+ template <
59
+ typename U = ArrayRef<T>,
60
+ std::enable_if_t<
61
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
62
+ !std::is_same_v<std::decay_t<U>, std::in_place_t> &&
63
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
64
+ !std::is_convertible_v<U&&, ArrayRef<T>>,
65
+ bool> = false>
66
+ constexpr explicit OptionalArrayRef(U&& value) noexcept(
67
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&>)
68
+ : wrapped_opt_array_ref(std::forward<U>(value)) {}
69
+
70
+ template <typename... Args>
71
+ constexpr explicit OptionalArrayRef(
72
+ std::in_place_t ip,
73
+ Args&&... args) noexcept
74
+ : wrapped_opt_array_ref(ip, std::forward<Args>(args)...) {}
75
+
76
+ template <typename U, typename... Args>
77
+ constexpr explicit OptionalArrayRef(
78
+ std::in_place_t ip,
79
+ std::initializer_list<U> il,
80
+ Args&&... args)
81
+ : wrapped_opt_array_ref(ip, il, std::forward<Args>(args)...) {}
82
+
83
+ constexpr OptionalArrayRef(const std::initializer_list<T>& Vec)
84
+ : wrapped_opt_array_ref(ArrayRef<T>(Vec)) {}
85
+
86
+ // Destructor
87
+
88
+ ~OptionalArrayRef() = default;
89
+
90
+ // Assignment
91
+
92
+ constexpr OptionalArrayRef& operator=(nullopt_t) noexcept {
93
+ wrapped_opt_array_ref = c10::nullopt;
94
+ return *this;
95
+ }
96
+
97
+ OptionalArrayRef& operator=(const OptionalArrayRef& other) = default;
98
+
99
+ OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default;
100
+
101
+ constexpr OptionalArrayRef& operator=(
102
+ const optional<ArrayRef<T>>& other) noexcept {
103
+ wrapped_opt_array_ref = other;
104
+ return *this;
105
+ }
106
+
107
+ constexpr OptionalArrayRef& operator=(
108
+ optional<ArrayRef<T>>&& other) noexcept {
109
+ wrapped_opt_array_ref = std::move(other);
110
+ return *this;
111
+ }
112
+
113
+ template <
114
+ typename U = ArrayRef<T>,
115
+ typename = std::enable_if_t<
116
+ !std::is_same_v<std::decay_t<U>, OptionalArrayRef> &&
117
+ std::is_constructible_v<ArrayRef<T>, U&&> &&
118
+ std::is_assignable_v<ArrayRef<T>&, U&&>>>
119
+ constexpr OptionalArrayRef& operator=(U&& value) noexcept(
120
+ std::is_nothrow_constructible_v<ArrayRef<T>, U&&> &&
121
+ std::is_nothrow_assignable_v<ArrayRef<T>&, U&&>) {
122
+ wrapped_opt_array_ref = std::forward<U>(value);
123
+ return *this;
124
+ }
125
+
126
+ // Observers
127
+
128
+ constexpr ArrayRef<T>* operator->() noexcept {
129
+ return &wrapped_opt_array_ref.value();
130
+ }
131
+
132
+ constexpr const ArrayRef<T>* operator->() const noexcept {
133
+ return &wrapped_opt_array_ref.value();
134
+ }
135
+
136
+ constexpr ArrayRef<T>& operator*() & noexcept {
137
+ return wrapped_opt_array_ref.value();
138
+ }
139
+
140
+ constexpr const ArrayRef<T>& operator*() const& noexcept {
141
+ return wrapped_opt_array_ref.value();
142
+ }
143
+
144
+ constexpr ArrayRef<T>&& operator*() && noexcept {
145
+ return std::move(wrapped_opt_array_ref.value());
146
+ }
147
+
148
+ constexpr const ArrayRef<T>&& operator*() const&& noexcept {
149
+ return std::move(wrapped_opt_array_ref.value());
150
+ }
151
+
152
+ constexpr explicit operator bool() const noexcept {
153
+ return wrapped_opt_array_ref.has_value();
154
+ }
155
+
156
+ constexpr bool has_value() const noexcept {
157
+ return wrapped_opt_array_ref.has_value();
158
+ }
159
+
160
+ constexpr ArrayRef<T>& value() & {
161
+ return wrapped_opt_array_ref.value();
162
+ }
163
+
164
+ constexpr const ArrayRef<T>& value() const& {
165
+ return wrapped_opt_array_ref.value();
166
+ }
167
+
168
+ constexpr ArrayRef<T>&& value() && {
169
+ return std::move(wrapped_opt_array_ref.value());
170
+ }
171
+
172
+ constexpr const ArrayRef<T>&& value() const&& {
173
+ return std::move(wrapped_opt_array_ref.value());
174
+ }
175
+
176
+ template <typename U>
177
+ constexpr std::
178
+ enable_if_t<std::is_convertible_v<U&&, ArrayRef<T>>, ArrayRef<T>>
179
+ value_or(U&& default_value) const& {
180
+ return wrapped_opt_array_ref.value_or(std::forward<U>(default_value));
181
+ }
182
+
183
+ template <typename U>
184
+ constexpr std::
185
+ enable_if_t<std::is_convertible_v<U&&, ArrayRef<T>>, ArrayRef<T>>
186
+ value_or(U&& default_value) && {
187
+ return wrapped_opt_array_ref.value_or(std::forward<U>(default_value));
188
+ }
189
+
190
+ // Modifiers
191
+
192
+ constexpr void swap(OptionalArrayRef& other) noexcept {
193
+ std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref);
194
+ }
195
+
196
+ constexpr void reset() noexcept {
197
+ wrapped_opt_array_ref.reset();
198
+ }
199
+
200
+ template <typename... Args>
201
+ constexpr std::
202
+ enable_if_t<std::is_constructible_v<ArrayRef<T>, Args&&...>, ArrayRef<T>&>
203
+ emplace(Args&&... args) noexcept(
204
+ std::is_nothrow_constructible_v<ArrayRef<T>, Args&&...>) {
205
+ return wrapped_opt_array_ref.emplace(std::forward<Args>(args)...);
206
+ }
207
+
208
+ template <typename U, typename... Args>
209
+ constexpr ArrayRef<T>& emplace(
210
+ std::initializer_list<U> il,
211
+ Args&&... args) noexcept {
212
+ return wrapped_opt_array_ref.emplace(il, std::forward<Args>(args)...);
213
+ }
214
+
215
+ private:
216
+ optional<ArrayRef<T>> wrapped_opt_array_ref;
217
+ };
218
+
219
+ using OptionalIntArrayRef = OptionalArrayRef<int64_t>;
220
+
221
+ inline bool operator==(
222
+ const OptionalIntArrayRef& a1,
223
+ const IntArrayRef& other) {
224
+ if (!a1.has_value()) {
225
+ return false;
226
+ }
227
+ return a1.value() == other;
228
+ }
229
+
230
+ inline bool operator==(
231
+ const c10::IntArrayRef& a1,
232
+ const c10::OptionalIntArrayRef& a2) {
233
+ return a2 == a1;
234
+ }
235
+
236
+ } // namespace c10
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Registry.h ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef C10_UTIL_REGISTRY_H_
2
+ #define C10_UTIL_REGISTRY_H_
3
+
4
+ /**
5
+ * Simple registry implementation that uses static variables to
6
+ * register object creators during program initialization time.
7
+ */
8
+
9
+ // NB: This Registry works poorly when you have other namespaces.
10
+ // Make all macro invocations from inside the at namespace.
11
+
12
+ #include <cstdio>
13
+ #include <cstdlib>
14
+ #include <functional>
15
+ #include <memory>
16
+ #include <mutex>
17
+ #include <stdexcept>
18
+ #include <string>
19
+ #include <unordered_map>
20
+ #include <vector>
21
+
22
+ #include <c10/macros/Export.h>
23
+ #include <c10/macros/Macros.h>
24
+ #include <c10/util/Type.h>
25
+
26
+ namespace c10 {
27
+
28
+ template <typename KeyType>
29
+ inline std::string KeyStrRepr(const KeyType& /*key*/) {
30
+ return "[key type printing not supported]";
31
+ }
32
+
33
+ template <>
34
+ inline std::string KeyStrRepr(const std::string& key) {
35
+ return key;
36
+ }
37
+
38
+ enum RegistryPriority {
39
+ REGISTRY_FALLBACK = 1,
40
+ REGISTRY_DEFAULT = 2,
41
+ REGISTRY_PREFERRED = 3,
42
+ };
43
+
44
+ /**
45
+ * @brief A template class that allows one to register classes by keys.
46
+ *
47
+ * The keys are usually a std::string specifying the name, but can be anything
48
+ * that can be used in a std::map.
49
+ *
50
+ * You should most likely not use the Registry class explicitly, but use the
51
+ * helper macros below to declare specific registries as well as registering
52
+ * objects.
53
+ */
54
+ template <class SrcType, class ObjectPtrType, class... Args>
55
+ class Registry {
56
+ public:
57
+ typedef std::function<ObjectPtrType(Args...)> Creator;
58
+
59
+ Registry(bool warning = true) : registry_(), priority_(), warning_(warning) {}
60
+
61
+ void Register(
62
+ const SrcType& key,
63
+ Creator creator,
64
+ const RegistryPriority priority = REGISTRY_DEFAULT) {
65
+ std::lock_guard<std::mutex> lock(register_mutex_);
66
+ // The if statement below is essentially the same as the following line:
67
+ // TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
68
+ // << " registered twice.";
69
+ // However, TORCH_CHECK_EQ depends on google logging, and since registration
70
+ // is carried out at static initialization time, we do not want to have an
71
+ // explicit dependency on glog's initialization function.
72
+ if (registry_.count(key) != 0) {
73
+ auto cur_priority = priority_[key];
74
+ if (priority > cur_priority) {
75
+ #ifdef DEBUG
76
+ std::string warn_msg =
77
+ "Overwriting already registered item for key " + KeyStrRepr(key);
78
+ fprintf(stderr, "%s\n", warn_msg.c_str());
79
+ #endif
80
+ registry_[key] = creator;
81
+ priority_[key] = priority;
82
+ } else if (priority == cur_priority) {
83
+ std::string err_msg =
84
+ "Key already registered with the same priority: " + KeyStrRepr(key);
85
+ fprintf(stderr, "%s\n", err_msg.c_str());
86
+ if (terminate_) {
87
+ std::exit(1);
88
+ } else {
89
+ throw std::runtime_error(err_msg);
90
+ }
91
+ } else if (warning_) {
92
+ std::string warn_msg =
93
+ "Higher priority item already registered, skipping registration of " +
94
+ KeyStrRepr(key);
95
+ fprintf(stderr, "%s\n", warn_msg.c_str());
96
+ }
97
+ } else {
98
+ registry_[key] = creator;
99
+ priority_[key] = priority;
100
+ }
101
+ }
102
+
103
+ void Register(
104
+ const SrcType& key,
105
+ Creator creator,
106
+ const std::string& help_msg,
107
+ const RegistryPriority priority = REGISTRY_DEFAULT) {
108
+ Register(key, creator, priority);
109
+ help_message_[key] = help_msg;
110
+ }
111
+
112
+ inline bool Has(const SrcType& key) {
113
+ return (registry_.count(key) != 0);
114
+ }
115
+
116
+ ObjectPtrType Create(const SrcType& key, Args... args) {
117
+ auto it = registry_.find(key);
118
+ if (it == registry_.end()) {
119
+ // Returns nullptr if the key is not registered.
120
+ return nullptr;
121
+ }
122
+ return it->second(args...);
123
+ }
124
+
125
+ /**
126
+ * Returns the keys currently registered as a std::vector.
127
+ */
128
+ std::vector<SrcType> Keys() const {
129
+ std::vector<SrcType> keys;
130
+ keys.reserve(registry_.size());
131
+ for (const auto& it : registry_) {
132
+ keys.push_back(it.first);
133
+ }
134
+ return keys;
135
+ }
136
+
137
+ inline const std::unordered_map<SrcType, std::string>& HelpMessage() const {
138
+ return help_message_;
139
+ }
140
+
141
+ const char* HelpMessage(const SrcType& key) const {
142
+ auto it = help_message_.find(key);
143
+ if (it == help_message_.end()) {
144
+ return nullptr;
145
+ }
146
+ return it->second.c_str();
147
+ }
148
+
149
+ // Used for testing, if terminate is unset, Registry throws instead of
150
+ // calling std::exit
151
+ void SetTerminate(bool terminate) {
152
+ terminate_ = terminate;
153
+ }
154
+
155
+ private:
156
+ std::unordered_map<SrcType, Creator> registry_;
157
+ std::unordered_map<SrcType, RegistryPriority> priority_;
158
+ bool terminate_{true};
159
+ const bool warning_;
160
+ std::unordered_map<SrcType, std::string> help_message_;
161
+ std::mutex register_mutex_;
162
+
163
+ C10_DISABLE_COPY_AND_ASSIGN(Registry);
164
+ };
165
+
166
+ template <class SrcType, class ObjectPtrType, class... Args>
167
+ class Registerer {
168
+ public:
169
+ explicit Registerer(
170
+ const SrcType& key,
171
+ Registry<SrcType, ObjectPtrType, Args...>* registry,
172
+ typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
173
+ const std::string& help_msg = "") {
174
+ registry->Register(key, creator, help_msg);
175
+ }
176
+
177
+ explicit Registerer(
178
+ const SrcType& key,
179
+ const RegistryPriority priority,
180
+ Registry<SrcType, ObjectPtrType, Args...>* registry,
181
+ typename Registry<SrcType, ObjectPtrType, Args...>::Creator creator,
182
+ const std::string& help_msg = "") {
183
+ registry->Register(key, creator, help_msg, priority);
184
+ }
185
+
186
+ template <class DerivedType>
187
+ static ObjectPtrType DefaultCreator(Args... args) {
188
+ return ObjectPtrType(new DerivedType(args...));
189
+ }
190
+ };
191
+
192
+ /**
193
+ * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function
194
+ * declaration, as well as creating a convenient typename for its corresponding
195
+ * registerer.
196
+ */
197
+ // Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE
198
+ // as import and DEFINE as export, because these registry macros will be used
199
+ // in downstream shared libraries as well, and one cannot use *_API - the API
200
+ // macro will be defined on a per-shared-library basis. Semantically, when one
201
+ // declares a typed registry it is always going to be IMPORT, and when one
202
+ // defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE),
203
+ // the instantiation unit is always going to be exported.
204
+ //
205
+ // The only unique condition is when in the same file one does DECLARE and
206
+ // DEFINE - in Windows compilers, this generates a warning that dllimport and
207
+ // dllexport are mixed, but the warning is fine and linker will be properly
208
+ // exporting the symbol. Same thing happens in the gflags flag declaration and
209
+ // definition caes.
210
+ #define C10_DECLARE_TYPED_REGISTRY( \
211
+ RegistryName, SrcType, ObjectType, PtrType, ...) \
212
+ C10_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
213
+ RegistryName(); \
214
+ typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
215
+ Registerer##RegistryName
216
+
217
+ #define TORCH_DECLARE_TYPED_REGISTRY( \
218
+ RegistryName, SrcType, ObjectType, PtrType, ...) \
219
+ TORCH_API ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
220
+ RegistryName(); \
221
+ typedef ::c10::Registerer<SrcType, PtrType<ObjectType>, ##__VA_ARGS__> \
222
+ Registerer##RegistryName
223
+
224
+ #define C10_DEFINE_TYPED_REGISTRY( \
225
+ RegistryName, SrcType, ObjectType, PtrType, ...) \
226
+ C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
227
+ RegistryName() { \
228
+ static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
229
+ registry = new ::c10:: \
230
+ Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>(); \
231
+ return registry; \
232
+ }
233
+
234
+ #define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
235
+ RegistryName, SrcType, ObjectType, PtrType, ...) \
236
+ C10_EXPORT ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
237
+ RegistryName() { \
238
+ static ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>* \
239
+ registry = \
240
+ new ::c10::Registry<SrcType, PtrType<ObjectType>, ##__VA_ARGS__>( \
241
+ false); \
242
+ return registry; \
243
+ }
244
+
245
+ // Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated
246
+ // creator with comma in its templated arguments.
247
+ #define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \
248
+ static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
249
+ key, RegistryName(), ##__VA_ARGS__);
250
+
251
+ #define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
252
+ RegistryName, key, priority, ...) \
253
+ static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
254
+ key, priority, RegistryName(), ##__VA_ARGS__);
255
+
256
+ #define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \
257
+ static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
258
+ key, \
259
+ RegistryName(), \
260
+ Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
261
+ ::c10::demangle_type<__VA_ARGS__>());
262
+
263
+ #define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
264
+ RegistryName, key, priority, ...) \
265
+ static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \
266
+ key, \
267
+ priority, \
268
+ RegistryName(), \
269
+ Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \
270
+ ::c10::demangle_type<__VA_ARGS__>());
271
+
272
+ // C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use
273
+ // std::string as the key type, because that is the most commonly used cases.
274
+ #define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
275
+ C10_DECLARE_TYPED_REGISTRY( \
276
+ RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
277
+
278
+ #define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \
279
+ TORCH_DECLARE_TYPED_REGISTRY( \
280
+ RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
281
+
282
+ #define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \
283
+ C10_DEFINE_TYPED_REGISTRY( \
284
+ RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
285
+
286
+ #define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \
287
+ C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
288
+ RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__)
289
+
290
+ #define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
291
+ C10_DECLARE_TYPED_REGISTRY( \
292
+ RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
293
+
294
+ #define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
295
+ TORCH_DECLARE_TYPED_REGISTRY( \
296
+ RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
297
+
298
+ #define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \
299
+ C10_DEFINE_TYPED_REGISTRY( \
300
+ RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
301
+
302
+ #define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \
303
+ RegistryName, ObjectType, ...) \
304
+ C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \
305
+ RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__)
306
+
307
+ // C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string
308
+ // as the key
309
+ // type, because that is the most commonly used cases.
310
+ #define C10_REGISTER_CREATOR(RegistryName, key, ...) \
311
+ C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__)
312
+
313
+ #define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \
314
+ C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \
315
+ RegistryName, #key, priority, __VA_ARGS__)
316
+
317
+ #define C10_REGISTER_CLASS(RegistryName, key, ...) \
318
+ C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__)
319
+
320
+ #define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \
321
+ C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \
322
+ RegistryName, #key, priority, __VA_ARGS__)
323
+
324
+ } // namespace c10
325
+
326
+ #endif // C10_UTIL_REGISTRY_H_
llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <array>
3
+ #include <cstddef>
4
+ #include <cstdint>
5
+ #include <type_traits>
6
+
7
+ /** Helper class for allocating temporary fixed size arrays with SBO.
8
+ *
9
+ * This is intentionally much simpler than SmallVector, to improve performance
10
+ * at the expense of many features:
11
+ * - No zero-initialization for numeric types
12
+ * - No resizing after construction
13
+ * - No copy/move
14
+ * - No non-trivial types
15
+ */
16
+
17
+ namespace c10 {
18
+
19
+ template <typename T, size_t N>
20
+ class SmallBuffer {
21
+ static_assert(std::is_trivial_v<T>, "SmallBuffer is intended for POD types");
22
+
23
+ std::array<T, N> storage_;
24
+ size_t size_{};
25
+ T* data_{};
26
+
27
+ public:
28
+ SmallBuffer(size_t size) : size_(size) {
29
+ if (size > N) {
30
+ data_ = new T[size];
31
+ } else {
32
+ data_ = &storage_[0];
33
+ }
34
+ }
35
+
36
+ SmallBuffer(const SmallBuffer&) = delete;
37
+ SmallBuffer& operator=(const SmallBuffer&) = delete;
38
+
39
+ // move constructor is needed in function return
40
+ SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} {
41
+ rhs.size_ = 0;
42
+ if (size_ > N) {
43
+ data_ = rhs.data_;
44
+ rhs.data_ = nullptr;
45
+ } else {
46
+ storage_ = std::move(rhs.storage_);
47
+ data_ = &storage_[0];
48
+ }
49
+ }
50
+
51
+ SmallBuffer& operator=(SmallBuffer&&) = delete;
52
+
53
+ ~SmallBuffer() {
54
+ if (size_ > N) {
55
+ delete[] data_;
56
+ }
57
+ }
58
+ T& operator[](size_t idx) {
59
+ return data()[idx];
60
+ }
61
+ const T& operator[](size_t idx) const {
62
+ return data()[idx];
63
+ }
64
+ T* data() {
65
+ return data_;
66
+ }
67
+ const T* data() const {
68
+ return data_;
69
+ }
70
+ size_t size() const {
71
+ return size_;
72
+ }
73
+ T* begin() {
74
+ return data_;
75
+ }
76
+ const T* begin() const {
77
+ return data_;
78
+ }
79
+ T* end() {
80
+ return data_ + size_;
81
+ }
82
+ const T* end() const {
83
+ return data_ + size_;
84
+ }
85
+ };
86
+
87
+ } // namespace c10