applied-ai-018 commited on
Commit
b8bf9b4
·
verified ·
1 Parent(s): e9b87ea

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  3. ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  4. ckpts/universal/global_step120/zero/26.attention.dense.weight/fp32.pt +3 -0
  5. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  6. venv/lib/python3.10/site-packages/torch/_vendor/__init__.py +0 -0
  7. venv/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py +15 -0
  9. venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py +61 -0
  13. venv/lib/python3.10/site-packages/torch/_vendor/packaging/version.py +563 -0
  14. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py +12 -0
  18. venv/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py +54 -0
  20. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py +97 -0
  23. venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py +202 -0
  24. venv/lib/python3.10/site-packages/torch/multiprocessing/__init__.py +78 -0
  25. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py +33 -0
  32. venv/lib/python3.10/site-packages/torch/multiprocessing/pool.py +52 -0
  33. venv/lib/python3.10/site-packages/torch/multiprocessing/queue.py +42 -0
  34. venv/lib/python3.10/site-packages/torch/multiprocessing/reductions.py +594 -0
  35. venv/lib/python3.10/site-packages/torch/multiprocessing/spawn.py +281 -0
  36. venv/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c31d9b40756acce3bf0d12e9e70a5e8ad64cc44a33bc787332542a2ef8601a0e
3
+ size 33555612
ckpts/universal/global_step120/zero/13.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd656b9bb2813f355456a55aa4531f6e8f59effe9dccb7ca4baf39aff3ffe6d5
3
+ size 33555533
ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaf03990d83477ebd6470c35339921b14a43573e13bcd08c1a823ab90baf46da
3
+ size 50332843
ckpts/universal/global_step120/zero/26.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78960c792019510b98e38947fa89b9b3e531ce29b5cafd36654448d90fb09f49
3
+ size 16778317
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906f6a6b35a773c528f47bb4c8e717b66e082a9a7ca178615e4c1bfcdbb5714d
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/_vendor/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_vendor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_vendor/packaging/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __title__ = "packaging"
6
+ __summary__ = "Core utilities for Python packages"
7
+ __uri__ = "https://github.com/pypa/packaging"
8
+
9
+ __version__ = "23.2"
10
+
11
+ __author__ = "Donald Stufft and individual contributors"
12
+ __email__ = "[email protected]"
13
+
14
+ __license__ = "BSD-2-Clause or Apache-2.0"
15
+ __copyright__ = "2014 %s" % __author__
venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (521 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/_structures.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/_vendor/packaging/__pycache__/version.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/_vendor/packaging/_structures.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+
6
+ class InfinityType:
7
+ def __repr__(self) -> str:
8
+ return "Infinity"
9
+
10
+ def __hash__(self) -> int:
11
+ return hash(repr(self))
12
+
13
+ def __lt__(self, other: object) -> bool:
14
+ return False
15
+
16
+ def __le__(self, other: object) -> bool:
17
+ return False
18
+
19
+ def __eq__(self, other: object) -> bool:
20
+ return isinstance(other, self.__class__)
21
+
22
+ def __gt__(self, other: object) -> bool:
23
+ return True
24
+
25
+ def __ge__(self, other: object) -> bool:
26
+ return True
27
+
28
+ def __neg__(self: object) -> "NegativeInfinityType":
29
+ return NegativeInfinity
30
+
31
+
32
+ Infinity = InfinityType()
33
+
34
+
35
+ class NegativeInfinityType:
36
+ def __repr__(self) -> str:
37
+ return "-Infinity"
38
+
39
+ def __hash__(self) -> int:
40
+ return hash(repr(self))
41
+
42
+ def __lt__(self, other: object) -> bool:
43
+ return True
44
+
45
+ def __le__(self, other: object) -> bool:
46
+ return True
47
+
48
+ def __eq__(self, other: object) -> bool:
49
+ return isinstance(other, self.__class__)
50
+
51
+ def __gt__(self, other: object) -> bool:
52
+ return False
53
+
54
+ def __ge__(self, other: object) -> bool:
55
+ return False
56
+
57
+ def __neg__(self: object) -> InfinityType:
58
+ return Infinity
59
+
60
+
61
+ NegativeInfinity = NegativeInfinityType()
venv/lib/python3.10/site-packages/torch/_vendor/packaging/version.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+ """
5
+ .. testsetup::
6
+
7
+ from packaging.version import parse, Version
8
+ """
9
+
10
+ import itertools
11
+ import re
12
+ from typing import Any, Callable, NamedTuple, Optional, SupportsInt, Tuple, Union
13
+
14
+ from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
15
+
16
+ __all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]
17
+
18
+ LocalType = Tuple[Union[int, str], ...]
19
+
20
+ CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]
21
+ CmpLocalType = Union[
22
+ NegativeInfinityType,
23
+ Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],
24
+ ]
25
+ CmpKey = Tuple[
26
+ int,
27
+ Tuple[int, ...],
28
+ CmpPrePostDevType,
29
+ CmpPrePostDevType,
30
+ CmpPrePostDevType,
31
+ CmpLocalType,
32
+ ]
33
+ VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]
34
+
35
+
36
+ class _Version(NamedTuple):
37
+ epoch: int
38
+ release: Tuple[int, ...]
39
+ dev: Optional[Tuple[str, int]]
40
+ pre: Optional[Tuple[str, int]]
41
+ post: Optional[Tuple[str, int]]
42
+ local: Optional[LocalType]
43
+
44
+
45
+ def parse(version: str) -> "Version":
46
+ """Parse the given version string.
47
+
48
+ >>> parse('1.0.dev1')
49
+ <Version('1.0.dev1')>
50
+
51
+ :param version: The version string to parse.
52
+ :raises InvalidVersion: When the version string is not a valid version.
53
+ """
54
+ return Version(version)
55
+
56
+
57
+ class InvalidVersion(ValueError):
58
+ """Raised when a version string is not a valid version.
59
+
60
+ >>> Version("invalid")
61
+ Traceback (most recent call last):
62
+ ...
63
+ packaging.version.InvalidVersion: Invalid version: 'invalid'
64
+ """
65
+
66
+
67
+ class _BaseVersion:
68
+ _key: Tuple[Any, ...]
69
+
70
+ def __hash__(self) -> int:
71
+ return hash(self._key)
72
+
73
+ # Please keep the duplicated `isinstance` check
74
+ # in the six comparisons hereunder
75
+ # unless you find a way to avoid adding overhead function calls.
76
+ def __lt__(self, other: "_BaseVersion") -> bool:
77
+ if not isinstance(other, _BaseVersion):
78
+ return NotImplemented
79
+
80
+ return self._key < other._key
81
+
82
+ def __le__(self, other: "_BaseVersion") -> bool:
83
+ if not isinstance(other, _BaseVersion):
84
+ return NotImplemented
85
+
86
+ return self._key <= other._key
87
+
88
+ def __eq__(self, other: object) -> bool:
89
+ if not isinstance(other, _BaseVersion):
90
+ return NotImplemented
91
+
92
+ return self._key == other._key
93
+
94
+ def __ge__(self, other: "_BaseVersion") -> bool:
95
+ if not isinstance(other, _BaseVersion):
96
+ return NotImplemented
97
+
98
+ return self._key >= other._key
99
+
100
+ def __gt__(self, other: "_BaseVersion") -> bool:
101
+ if not isinstance(other, _BaseVersion):
102
+ return NotImplemented
103
+
104
+ return self._key > other._key
105
+
106
+ def __ne__(self, other: object) -> bool:
107
+ if not isinstance(other, _BaseVersion):
108
+ return NotImplemented
109
+
110
+ return self._key != other._key
111
+
112
+
113
+ # Deliberately not anchored to the start and end of the string, to make it
114
+ # easier for 3rd party code to reuse
115
+ _VERSION_PATTERN = r"""
116
+ v?
117
+ (?:
118
+ (?:(?P<epoch>[0-9]+)!)? # epoch
119
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
120
+ (?P<pre> # pre-release
121
+ [-_\.]?
122
+ (?P<pre_l>alpha|a|beta|b|preview|pre|c|rc)
123
+ [-_\.]?
124
+ (?P<pre_n>[0-9]+)?
125
+ )?
126
+ (?P<post> # post release
127
+ (?:-(?P<post_n1>[0-9]+))
128
+ |
129
+ (?:
130
+ [-_\.]?
131
+ (?P<post_l>post|rev|r)
132
+ [-_\.]?
133
+ (?P<post_n2>[0-9]+)?
134
+ )
135
+ )?
136
+ (?P<dev> # dev release
137
+ [-_\.]?
138
+ (?P<dev_l>dev)
139
+ [-_\.]?
140
+ (?P<dev_n>[0-9]+)?
141
+ )?
142
+ )
143
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
144
+ """
145
+
146
+ VERSION_PATTERN = _VERSION_PATTERN
147
+ """
148
+ A string containing the regular expression used to match a valid version.
149
+
150
+ The pattern is not anchored at either end, and is intended for embedding in larger
151
+ expressions (for example, matching a version number as part of a file name). The
152
+ regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
153
+ flags set.
154
+
155
+ :meta hide-value:
156
+ """
157
+
158
+
159
+ class Version(_BaseVersion):
160
+ """This class abstracts handling of a project's versions.
161
+
162
+ A :class:`Version` instance is comparison aware and can be compared and
163
+ sorted using the standard Python interfaces.
164
+
165
+ >>> v1 = Version("1.0a5")
166
+ >>> v2 = Version("1.0")
167
+ >>> v1
168
+ <Version('1.0a5')>
169
+ >>> v2
170
+ <Version('1.0')>
171
+ >>> v1 < v2
172
+ True
173
+ >>> v1 == v2
174
+ False
175
+ >>> v1 > v2
176
+ False
177
+ >>> v1 >= v2
178
+ False
179
+ >>> v1 <= v2
180
+ True
181
+ """
182
+
183
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
184
+ _key: CmpKey
185
+
186
+ def __init__(self, version: str) -> None:
187
+ """Initialize a Version object.
188
+
189
+ :param version:
190
+ The string representation of a version which will be parsed and normalized
191
+ before use.
192
+ :raises InvalidVersion:
193
+ If the ``version`` does not conform to PEP 440 in any way then this
194
+ exception will be raised.
195
+ """
196
+
197
+ # Validate the version and parse it into pieces
198
+ match = self._regex.search(version)
199
+ if not match:
200
+ raise InvalidVersion(f"Invalid version: '{version}'")
201
+
202
+ # Store the parsed out pieces of the version
203
+ self._version = _Version(
204
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
205
+ release=tuple(int(i) for i in match.group("release").split(".")),
206
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
207
+ post=_parse_letter_version(
208
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
209
+ ),
210
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
211
+ local=_parse_local_version(match.group("local")),
212
+ )
213
+
214
+ # Generate a key which will be used for sorting
215
+ self._key = _cmpkey(
216
+ self._version.epoch,
217
+ self._version.release,
218
+ self._version.pre,
219
+ self._version.post,
220
+ self._version.dev,
221
+ self._version.local,
222
+ )
223
+
224
+ def __repr__(self) -> str:
225
+ """A representation of the Version that shows all internal state.
226
+
227
+ >>> Version('1.0.0')
228
+ <Version('1.0.0')>
229
+ """
230
+ return f"<Version('{self}')>"
231
+
232
+ def __str__(self) -> str:
233
+ """A string representation of the version that can be rounded-tripped.
234
+
235
+ >>> str(Version("1.0a5"))
236
+ '1.0a5'
237
+ """
238
+ parts = []
239
+
240
+ # Epoch
241
+ if self.epoch != 0:
242
+ parts.append(f"{self.epoch}!")
243
+
244
+ # Release segment
245
+ parts.append(".".join(str(x) for x in self.release))
246
+
247
+ # Pre-release
248
+ if self.pre is not None:
249
+ parts.append("".join(str(x) for x in self.pre))
250
+
251
+ # Post-release
252
+ if self.post is not None:
253
+ parts.append(f".post{self.post}")
254
+
255
+ # Development release
256
+ if self.dev is not None:
257
+ parts.append(f".dev{self.dev}")
258
+
259
+ # Local version segment
260
+ if self.local is not None:
261
+ parts.append(f"+{self.local}")
262
+
263
+ return "".join(parts)
264
+
265
+ @property
266
+ def epoch(self) -> int:
267
+ """The epoch of the version.
268
+
269
+ >>> Version("2.0.0").epoch
270
+ 0
271
+ >>> Version("1!2.0.0").epoch
272
+ 1
273
+ """
274
+ return self._version.epoch
275
+
276
+ @property
277
+ def release(self) -> Tuple[int, ...]:
278
+ """The components of the "release" segment of the version.
279
+
280
+ >>> Version("1.2.3").release
281
+ (1, 2, 3)
282
+ >>> Version("2.0.0").release
283
+ (2, 0, 0)
284
+ >>> Version("1!2.0.0.post0").release
285
+ (2, 0, 0)
286
+
287
+ Includes trailing zeroes but not the epoch or any pre-release / development /
288
+ post-release suffixes.
289
+ """
290
+ return self._version.release
291
+
292
+ @property
293
+ def pre(self) -> Optional[Tuple[str, int]]:
294
+ """The pre-release segment of the version.
295
+
296
+ >>> print(Version("1.2.3").pre)
297
+ None
298
+ >>> Version("1.2.3a1").pre
299
+ ('a', 1)
300
+ >>> Version("1.2.3b1").pre
301
+ ('b', 1)
302
+ >>> Version("1.2.3rc1").pre
303
+ ('rc', 1)
304
+ """
305
+ return self._version.pre
306
+
307
+ @property
308
+ def post(self) -> Optional[int]:
309
+ """The post-release number of the version.
310
+
311
+ >>> print(Version("1.2.3").post)
312
+ None
313
+ >>> Version("1.2.3.post1").post
314
+ 1
315
+ """
316
+ return self._version.post[1] if self._version.post else None
317
+
318
+ @property
319
+ def dev(self) -> Optional[int]:
320
+ """The development number of the version.
321
+
322
+ >>> print(Version("1.2.3").dev)
323
+ None
324
+ >>> Version("1.2.3.dev1").dev
325
+ 1
326
+ """
327
+ return self._version.dev[1] if self._version.dev else None
328
+
329
+ @property
330
+ def local(self) -> Optional[str]:
331
+ """The local version segment of the version.
332
+
333
+ >>> print(Version("1.2.3").local)
334
+ None
335
+ >>> Version("1.2.3+abc").local
336
+ 'abc'
337
+ """
338
+ if self._version.local:
339
+ return ".".join(str(x) for x in self._version.local)
340
+ else:
341
+ return None
342
+
343
+ @property
344
+ def public(self) -> str:
345
+ """The public portion of the version.
346
+
347
+ >>> Version("1.2.3").public
348
+ '1.2.3'
349
+ >>> Version("1.2.3+abc").public
350
+ '1.2.3'
351
+ >>> Version("1.2.3+abc.dev1").public
352
+ '1.2.3'
353
+ """
354
+ return str(self).split("+", 1)[0]
355
+
356
+ @property
357
+ def base_version(self) -> str:
358
+ """The "base version" of the version.
359
+
360
+ >>> Version("1.2.3").base_version
361
+ '1.2.3'
362
+ >>> Version("1.2.3+abc").base_version
363
+ '1.2.3'
364
+ >>> Version("1!1.2.3+abc.dev1").base_version
365
+ '1!1.2.3'
366
+
367
+ The "base version" is the public version of the project without any pre or post
368
+ release markers.
369
+ """
370
+ parts = []
371
+
372
+ # Epoch
373
+ if self.epoch != 0:
374
+ parts.append(f"{self.epoch}!")
375
+
376
+ # Release segment
377
+ parts.append(".".join(str(x) for x in self.release))
378
+
379
+ return "".join(parts)
380
+
381
+ @property
382
+ def is_prerelease(self) -> bool:
383
+ """Whether this version is a pre-release.
384
+
385
+ >>> Version("1.2.3").is_prerelease
386
+ False
387
+ >>> Version("1.2.3a1").is_prerelease
388
+ True
389
+ >>> Version("1.2.3b1").is_prerelease
390
+ True
391
+ >>> Version("1.2.3rc1").is_prerelease
392
+ True
393
+ >>> Version("1.2.3dev1").is_prerelease
394
+ True
395
+ """
396
+ return self.dev is not None or self.pre is not None
397
+
398
+ @property
399
+ def is_postrelease(self) -> bool:
400
+ """Whether this version is a post-release.
401
+
402
+ >>> Version("1.2.3").is_postrelease
403
+ False
404
+ >>> Version("1.2.3.post1").is_postrelease
405
+ True
406
+ """
407
+ return self.post is not None
408
+
409
+ @property
410
+ def is_devrelease(self) -> bool:
411
+ """Whether this version is a development release.
412
+
413
+ >>> Version("1.2.3").is_devrelease
414
+ False
415
+ >>> Version("1.2.3.dev1").is_devrelease
416
+ True
417
+ """
418
+ return self.dev is not None
419
+
420
+ @property
421
+ def major(self) -> int:
422
+ """The first item of :attr:`release` or ``0`` if unavailable.
423
+
424
+ >>> Version("1.2.3").major
425
+ 1
426
+ """
427
+ return self.release[0] if len(self.release) >= 1 else 0
428
+
429
+ @property
430
+ def minor(self) -> int:
431
+ """The second item of :attr:`release` or ``0`` if unavailable.
432
+
433
+ >>> Version("1.2.3").minor
434
+ 2
435
+ >>> Version("1").minor
436
+ 0
437
+ """
438
+ return self.release[1] if len(self.release) >= 2 else 0
439
+
440
+ @property
441
+ def micro(self) -> int:
442
+ """The third item of :attr:`release` or ``0`` if unavailable.
443
+
444
+ >>> Version("1.2.3").micro
445
+ 3
446
+ >>> Version("1").micro
447
+ 0
448
+ """
449
+ return self.release[2] if len(self.release) >= 3 else 0
450
+
451
+
452
+ def _parse_letter_version(
453
+ letter: Optional[str], number: Union[str, bytes, SupportsInt, None]
454
+ ) -> Optional[Tuple[str, int]]:
455
+
456
+ if letter:
457
+ # We consider there to be an implicit 0 in a pre-release if there is
458
+ # not a numeral associated with it.
459
+ if number is None:
460
+ number = 0
461
+
462
+ # We normalize any letters to their lower case form
463
+ letter = letter.lower()
464
+
465
+ # We consider some words to be alternate spellings of other words and
466
+ # in those cases we want to normalize the spellings to our preferred
467
+ # spelling.
468
+ if letter == "alpha":
469
+ letter = "a"
470
+ elif letter == "beta":
471
+ letter = "b"
472
+ elif letter in ["c", "pre", "preview"]:
473
+ letter = "rc"
474
+ elif letter in ["rev", "r"]:
475
+ letter = "post"
476
+
477
+ return letter, int(number)
478
+ if not letter and number:
479
+ # We assume if we are given a number, but we are not given a letter
480
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
481
+ letter = "post"
482
+
483
+ return letter, int(number)
484
+
485
+ return None
486
+
487
+
488
+ _local_version_separators = re.compile(r"[\._-]")
489
+
490
+
491
+ def _parse_local_version(local: Optional[str]) -> Optional[LocalType]:
492
+ """
493
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
494
+ """
495
+ if local is not None:
496
+ return tuple(
497
+ part.lower() if not part.isdigit() else int(part)
498
+ for part in _local_version_separators.split(local)
499
+ )
500
+ return None
501
+
502
+
503
+ def _cmpkey(
504
+ epoch: int,
505
+ release: Tuple[int, ...],
506
+ pre: Optional[Tuple[str, int]],
507
+ post: Optional[Tuple[str, int]],
508
+ dev: Optional[Tuple[str, int]],
509
+ local: Optional[LocalType],
510
+ ) -> CmpKey:
511
+
512
+ # When we compare a release version, we want to compare it with all of the
513
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
514
+ # leading zeros until we come to something non zero, then take the rest
515
+ # re-reverse it back into the correct order and make it a tuple and use
516
+ # that for our sorting key.
517
+ _release = tuple(
518
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
519
+ )
520
+
521
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
522
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
523
+ # if there is not a pre or a post segment. If we have one of those then
524
+ # the normal sorting rules will handle this case correctly.
525
+ if pre is None and post is None and dev is not None:
526
+ _pre: CmpPrePostDevType = NegativeInfinity
527
+ # Versions without a pre-release (except as noted above) should sort after
528
+ # those with one.
529
+ elif pre is None:
530
+ _pre = Infinity
531
+ else:
532
+ _pre = pre
533
+
534
+ # Versions without a post segment should sort before those with one.
535
+ if post is None:
536
+ _post: CmpPrePostDevType = NegativeInfinity
537
+
538
+ else:
539
+ _post = post
540
+
541
+ # Versions without a development segment should sort after those with one.
542
+ if dev is None:
543
+ _dev: CmpPrePostDevType = Infinity
544
+
545
+ else:
546
+ _dev = dev
547
+
548
+ if local is None:
549
+ # Versions without a local segment should sort before those with one.
550
+ _local: CmpLocalType = NegativeInfinity
551
+ else:
552
+ # Versions with a local segment need that segment parsed to implement
553
+ # the sorting rules in PEP440.
554
+ # - Alpha numeric segments sort before numeric segments
555
+ # - Alpha numeric segments sort lexicographically
556
+ # - Numeric segments sort numerically
557
+ # - Shorter versions sort before longer versions when the prefixes
558
+ # match exactly
559
+ _local = tuple(
560
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
561
+ )
562
+
563
+ return epoch, _release, _pre, _post, _dev, _local
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (316 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/api.cpython-310.pyc ADDED
Binary file (9.82 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/sharder.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Keep old package for BC purposes, this file should be removed once
2
+ # everything moves to the `torch.distributed.checkpoint` package.
3
+ import sys
4
+ import torch
5
+ import warnings
6
+
7
+ from torch.distributed.checkpoint import * # noqa: F403
8
+ warnings.warn(
9
+ "torch.distributed._shard.checkpoint will be deprecated, use torch.distributed.checkpoint instead",
10
+ DeprecationWarning
11
+ )
12
+ sys.modules['torch.distributed._shard.checkpoint'] = torch.distributed.checkpoint
venv/lib/python3.10/site-packages/torch/distributed/_shard/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (531 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, Tuple, Union
2
+ from .api import ShardedOptimizer
3
+
4
+ import torch.nn as nn
5
+
6
+ from torch.distributed._shard.sharded_tensor import (
7
+ ShardedTensor
8
+ )
9
+
10
+ def named_params_with_sharded_tensor(
11
+ module: nn.Module,
12
+ prefix: str = '',
13
+ recurse: bool = True,
14
+ ) -> Iterator[Tuple[str, Union[nn.Parameter, ShardedTensor]]]:
15
+
16
+ r"""Returns an iterator over module parameters (together with the
17
+ ShardedTensor parameters), yielding both the name of the parameter
18
+ as well as the parameter itself. This is typically passed to a
19
+ :class:torch.distributed._shard.sharded_optim.ShardedOptimizer
20
+
21
+ Args:
22
+ prefix (str): prefix to prepend to all parameter names.
23
+ recurse (bool): if True, then yields parameters of this module
24
+ and all submodules. Otherwise, yields only parameters that
25
+ are direct members of this module.
26
+
27
+ Yields:
28
+ (str, Union[Tensor, ShardedTensor]): Tuple containing
29
+ the name and parameter (or ShardedTensor parameter)
30
+
31
+ Example::
32
+
33
+ >>> # xdoctest: +SKIP
34
+ >>> model = torch.nn.Linear(*linear_size)
35
+ >>> shard_parameter(model, "weight", spec)
36
+ >>> for name, param in named_params_with_sharded_tensor(model):
37
+ >>> if name in ['weight']:
38
+ >>> print(param.size())
39
+
40
+ """
41
+ modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
42
+
43
+ memo = set()
44
+ for mod_prefix, mod in modules:
45
+ # find all sharded tensor params
46
+ for name, val in vars(mod).items():
47
+ if isinstance(val, ShardedTensor) and val not in memo:
48
+ memo.add(val)
49
+ name = mod_prefix + ('.' if mod_prefix else '') + name
50
+ yield name, val
51
+
52
+ # find all nn.Parameters
53
+ for name, val in module.named_parameters():
54
+ yield name, val
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/__pycache__/api.cpython-310.pyc ADDED
Binary file (4.57 kB). View file
 
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_optim/api.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Union, Mapping, Dict, Any
2
+
3
+ import torch.optim as optim
4
+ from torch import Tensor
5
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
6
+
7
+
8
+ class ShardedOptimizer(optim.Optimizer):
9
+ def __init__(
10
+ self,
11
+ named_params: Mapping[str, Union[Tensor, ShardedTensor]],
12
+ optimizer_class,
13
+ *optimizer_args,
14
+ **optimizer_kwargs
15
+ ):
16
+ """
17
+ ShardedOptimizer collects all tensors and local shard tensors of
18
+ ShardedTensor, then use these tensors as ``params`` for optimizers
19
+
20
+ Args:
21
+ named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
22
+ of parameters, where key is the parameter key, value is either
23
+ Tensor or ShardedTensor parameter.
24
+ optimizer_class (torch.optim.Optimizer): the Optimizer to use
25
+ locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
26
+ *optimizer_args: the arguments to initialize the optimizer.
27
+ **optimizer_kwargs: the key-word arguments to initialize the optimizer.
28
+
29
+ """
30
+ tensors: List[Tensor] = []
31
+ for value in named_params.values():
32
+ if isinstance(value, ShardedTensor):
33
+ for local_shard in value.local_shards():
34
+ tensors.append(local_shard.tensor)
35
+ else:
36
+ tensors.append(value)
37
+
38
+ self.named_params = named_params
39
+ self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs)
40
+ self.param_groups = self._optim.param_groups
41
+ self.state = self._optim.state
42
+
43
+ def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
44
+ r"""Resets the gradients of all optimized :class:`torch.Tensor` s.
45
+
46
+ Args:
47
+ set_to_none (bool): instead of setting to zero, set the grads to None.
48
+ This will in general have lower memory footprint, and can modestly improve performance.
49
+ However, it changes certain behaviors. For example:
50
+ 1. When the user tries to access a gradient and perform manual ops on it,
51
+ a None attribute or a Tensor full of 0s will behave differently.
52
+ 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
53
+ are guaranteed to be None for params that did not receive a gradient.
54
+ 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
55
+ (in one case it does the step with a gradient of 0 and in the other it skips
56
+ the step altogether).
57
+ """
58
+ self._optim.zero_grad(set_to_none)
59
+
60
+ def step(self, closure=None):
61
+ r"""Performs a single optimization step (parameter update).
62
+
63
+ Args:
64
+ closure (Callable): A closure that reevaluates the model and
65
+ returns the loss. Optional for most optimizers.
66
+
67
+ .. note::
68
+ Unless otherwise specified, this function should not modify the
69
+ ``.grad`` field of the parameters.
70
+ """
71
+ self._optim.step(closure)
72
+
73
+ def state_dict(self) -> Dict[str, Any]:
74
+ """
75
+ Returned state and param_groups will contain parameter keys
76
+ instead of parameter indices like torch.optim.Optimizer.
77
+ This allows for advanced functionality like optimizer re-sharding to be implemented.
78
+ """
79
+ # TODO: implement state_dict
80
+ raise NotImplementedError("ShardedOptimizer state_dict not implemented yet!")
81
+
82
+
83
+ def load_state_dict(self, state_dict: Mapping[str, Any]):
84
+ r"""Loads the ShardedOptimizer state.
85
+
86
+ Args:
87
+ state_dict (dict): ShardedOptimizer state. Should be an object returned
88
+ from a call to :meth:`state_dict`.
89
+ """
90
+ # TODO: implement load_state_dict
91
+ raise NotImplementedError("ShardedOptimizer load_state_dict not implemented yet!")
92
+
93
+ def add_param_group(self, param_group: Any):
94
+ r"""Add a new param group
95
+ """
96
+ # TODO: implement add_param_group
97
+ raise NotImplementedError("ShardedOptimizer add_param_group not implemented yet!")
venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ import torch
3
+ import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
4
+ from torch.distributed._shard.metadata import ShardMetadata
5
+ from torch.distributed._shard.sharded_tensor.shard import Shard
6
+ from torch.distributed._shard.sharded_tensor.utils import (
7
+ _parse_and_validate_remote_device
8
+ )
9
+ from torch.distributed._shard._utils import narrow_tensor
10
+ import torch.distributed as dist
11
+ import torch.distributed.distributed_c10d as distributed_c10d
12
+ from typing import List, Union, TYPE_CHECKING
13
+ from ._internals import (
14
+ get_chunked_dim_size,
15
+ get_split_size,
16
+ )
17
+
18
+ from .api import ShardingSpec
19
+
20
+ if TYPE_CHECKING:
21
+ # Only include ShardedTensor when do type checking, exclude it
22
+ # from run-time to resolve circular dependency.
23
+ from torch.distributed._shard.sharded_tensor import ShardedTensor
24
+
25
+ @dataclass
26
+ class ChunkShardingSpec(ShardingSpec):
27
+ """
28
+ This is a type of PlacementSpec that defines the placement as being sharded
29
+ across multiple devices. In particular, it represents sharding a Tensor
30
+ along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
31
+
32
+ The semantics of how a tensor is partitioned is inline with
33
+ :meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
34
+ specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
35
+ in the placement specified.
36
+
37
+ Args:
38
+ dim (int or str):
39
+ The dimension to shard on, could be an integer representing the
40
+ dimension or a string in case of named tensors where dimensions are
41
+ named. Note that named tensor support is not added yet.
42
+ placement(List[Union[_remote_device, str]]):
43
+ Specifies the placement of each shard of the Tensor. The size of
44
+ the list represents the number of shards to be created. This could
45
+ be a list of
46
+ :class:`torch.distributed._remote_device`'s. This list
47
+ could also contain a string which represents remote
48
+ device as accepted by
49
+ :class:`torch.distributed._remote_device`
50
+ """
51
+
52
+ ShardingDim = Union[int, str]
53
+
54
+ dim: ShardingDim
55
+ placements: List[Union[torch.distributed._remote_device, str]]
56
+
57
+ def __post_init__(self):
58
+ self._verify_dim(self.dim)
59
+ for i, remote_device in enumerate(self.placements):
60
+ if not isinstance(remote_device, torch.distributed._remote_device):
61
+ self.placements[i] = torch.distributed._remote_device(remote_device)
62
+
63
+ @staticmethod
64
+ def _verify_dim(dim):
65
+ # Validate the sharding spec.
66
+ # TODO: support named dimension
67
+ if isinstance(dim, str):
68
+ raise NotImplementedError(
69
+ "ChunkShardingSpec does not support named dimension yet!"
70
+ )
71
+
72
+ if not isinstance(dim, int):
73
+ raise ValueError(
74
+ f"Sharding dim needs to be an integer, found: {dim}"
75
+ )
76
+
77
+ def build_metadata(self,
78
+ tensor_sizes: torch.Size,
79
+ tensor_properties: sharded_tensor_meta.TensorProperties,
80
+ ) -> sharded_tensor_meta.ShardedTensorMetadata:
81
+ tensor_num_dim = len(tensor_sizes)
82
+
83
+ self._verify_dim(self.dim)
84
+ if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
85
+ raise ValueError(f"Invalid sharding dim: {self.dim}")
86
+
87
+ shards_metadata = []
88
+ sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
89
+ chunks = len(self.placements)
90
+ split_size = get_split_size(sharding_dim_size, chunks)
91
+ for idx, placement in enumerate(self.placements):
92
+ # generate ShardMetadata for each placement device
93
+ chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
94
+ shard_size = list(tensor_sizes)
95
+ current_offsets = [0] * tensor_num_dim
96
+ current_offsets[self.dim] = split_size * idx # type: ignore[index]
97
+ shard_size[self.dim] = chunked_dim_size # type: ignore[index]
98
+
99
+ shard_metadata = ShardMetadata(
100
+ shard_offsets=current_offsets,
101
+ shard_sizes=shard_size,
102
+ placement=placement,
103
+ )
104
+ shards_metadata.append(shard_metadata)
105
+
106
+ return sharded_tensor_meta.ShardedTensorMetadata(
107
+ shards_metadata,
108
+ tensor_sizes,
109
+ tensor_properties
110
+ )
111
+
112
+
113
+ def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
114
+ """
115
+ Args:
116
+ src_rank: group rank relative to ``process_group``
117
+
118
+ N.B. If ``process_group`` is None, ``src_rank`` is a global rank.
119
+ """
120
+ # relative imports to avoid circular dependency
121
+ from torch.distributed._shard.sharded_tensor import (
122
+ ShardedTensor
123
+ )
124
+ tensor_properties = sharded_tensor_meta.TensorProperties(
125
+ dtype=tensor.dtype,
126
+ layout=tensor.layout,
127
+ requires_grad=tensor.requires_grad,
128
+ memory_format=torch.contiguous_format,
129
+ pin_memory=tensor.is_pinned()
130
+ )
131
+ current_rank = dist.get_rank(process_group)
132
+ tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
133
+ local_shards = []
134
+ local_tensor = None
135
+ local_metadata = None
136
+ tensors_to_scatter = [None] * dist.get_world_size(process_group)
137
+
138
+ sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
139
+ chunks = len(self.placements)
140
+ split_size = get_split_size(sharding_dim_size, chunks)
141
+ scatter_shape = list(tensor.size())
142
+ scatter_shape[self.dim] = split_size # type: ignore[index]
143
+
144
+ for shard_meta in tensor_meta.shards_metadata:
145
+ rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
146
+ if current_rank == src_rank:
147
+ # Reshape to get shard for this rank and we don't want autograd
148
+ # recording here for the narrow op and 'local_shard' should be a
149
+ # leaf variable in the autograd graph.
150
+ narrowed_tensor = narrow_tensor(tensor, shard_meta)
151
+ if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
152
+ # for the last shard that might be smaller to other shards
153
+ # resize the narrowed tensor to the same size and use it for
154
+ # the scatter collective as dist.scatter requires same size
155
+ # inputs on every rank
156
+ tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)
157
+ else:
158
+ tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
159
+
160
+ tensors_to_scatter[rank] = tensor_to_scatter
161
+
162
+ if current_rank == rank:
163
+ local_tensor = torch.empty(
164
+ scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
165
+ local_metadata = shard_meta
166
+
167
+ # each rank should have local_tensor and local_metadata initialized if we build
168
+ # the metadata list in a correct way.
169
+ assert local_tensor is not None
170
+ assert local_metadata is not None
171
+
172
+ # Scatter the shards to all ranks in the pg
173
+ # scatter takes the global rank as ``src``
174
+ src_for_scatter = src_rank
175
+ if process_group is not None and process_group is not distributed_c10d._get_default_group():
176
+ src_for_scatter = distributed_c10d.get_global_rank(process_group, src_for_scatter)
177
+
178
+ dist.scatter(
179
+ local_tensor,
180
+ scatter_list=tensors_to_scatter if current_rank == src_rank else None,
181
+ src=src_for_scatter,
182
+ group=process_group
183
+ )
184
+
185
+ if list(local_tensor.size()) != local_metadata.shard_sizes:
186
+ # detach again after receiving to ensure local shards remain a leaf node
187
+ local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
188
+
189
+ # Sync requires_grad to local_shard.
190
+ local_tensor.requires_grad = tensor.requires_grad
191
+
192
+ local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
193
+
194
+ st = ShardedTensor._init_from_local_shards_and_global_metadata(
195
+ local_shards,
196
+ tensor_meta,
197
+ process_group=process_group)
198
+
199
+ # Manually set sharding_spec
200
+ st._sharding_spec = self
201
+
202
+ return st
venv/lib/python3.10/site-packages/torch/multiprocessing/__init__.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` module.
2
+
3
+ It registers custom reducers, that use shared memory to provide shared
4
+ views on the same data in different processes. Once the tensor/storage is moved
5
+ to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible
6
+ to send it to other processes without making any copies.
7
+
8
+ The API is 100% compatible with the original module - it's enough to change
9
+ ``import multiprocessing`` to ``import torch.multiprocessing`` to have all the
10
+ tensors sent through the queues or shared via other mechanisms, moved to shared
11
+ memory.
12
+
13
+ Because of the similarity of APIs we do not document most of this package
14
+ contents, and we recommend referring to very good docs of the original module.
15
+ """
16
+ import multiprocessing
17
+ import sys
18
+
19
+ import torch
20
+ from .reductions import init_reductions
21
+
22
+ __all__ = ["set_sharing_strategy", "get_sharing_strategy", "get_all_sharing_strategies"]
23
+
24
+
25
+ from multiprocessing import * # noqa: F403
26
+
27
+
28
+ __all__ += multiprocessing.__all__ # noqa: PLE0605 type: ignore[attr-defined]
29
+
30
+
31
+ # This call adds a Linux specific prctl(2) wrapper function to this module.
32
+ # See https://github.com/pytorch/pytorch/pull/14391 for more information.
33
+ torch._C._multiprocessing_init()
34
+
35
+
36
+ """Add helper function to spawn N processes and wait for completion of any of
37
+ them. This depends `mp.get_context` which was added in Python 3.4."""
38
+ from .spawn import (
39
+ ProcessContext,
40
+ ProcessExitedException,
41
+ ProcessRaisedException,
42
+ spawn,
43
+ SpawnContext,
44
+ start_processes,
45
+ )
46
+
47
+
48
+ if sys.platform == "darwin" or sys.platform == "win32":
49
+ _sharing_strategy = "file_system"
50
+ _all_sharing_strategies = {"file_system"}
51
+ else:
52
+ _sharing_strategy = "file_descriptor"
53
+ _all_sharing_strategies = {"file_descriptor", "file_system"}
54
+
55
+
56
+ def set_sharing_strategy(new_strategy):
57
+ """Set the strategy for sharing CPU tensors.
58
+
59
+ Args:
60
+ new_strategy (str): Name of the selected strategy. Should be one of
61
+ the values returned by :func:`get_all_sharing_strategies()`.
62
+ """
63
+ global _sharing_strategy
64
+ assert new_strategy in _all_sharing_strategies
65
+ _sharing_strategy = new_strategy
66
+
67
+
68
+ def get_sharing_strategy():
69
+ """Return the current strategy for sharing CPU tensors."""
70
+ return _sharing_strategy
71
+
72
+
73
+ def get_all_sharing_strategies():
74
+ """Return a set of sharing strategies supported on a current system."""
75
+ return _all_sharing_strategies
76
+
77
+
78
+ init_reductions()
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/_atfork.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/pool.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/queue.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/reductions.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/__pycache__/spawn.cpython-310.pyc ADDED
Binary file (8.42 kB). View file
 
venv/lib/python3.10/site-packages/torch/multiprocessing/_atfork.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ __all__ = ["register_after_fork"]
4
+
5
+ if sys.platform == "win32":
6
+ import multiprocessing.util as _util
7
+
8
+ def _register(func):
9
+ def wrapper(arg):
10
+ func()
11
+
12
+ _util.register_after_fork(_register, wrapper)
13
+
14
+ else:
15
+ import os
16
+
17
+ def _register(func):
18
+ os.register_at_fork(after_in_child=func)
19
+
20
+
21
+ def register_after_fork(func):
22
+ """Register a callable to be executed in the child process after a fork.
23
+
24
+ Note:
25
+ In python < 3.7 this will only work with processes created using the
26
+ ``multiprocessing`` module. In python >= 3.7 it also works with
27
+ ``os.fork()``.
28
+
29
+ Args:
30
+ func (function): Function taking no arguments to be called in the child after fork
31
+
32
+ """
33
+ _register(func)
venv/lib/python3.10/site-packages/torch/multiprocessing/pool.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing.pool
2
+ import multiprocessing.util as util
3
+
4
+ from .queue import SimpleQueue
5
+
6
+
7
+ def clean_worker(*args, **kwargs):
8
+ import gc
9
+
10
+ multiprocessing.pool.worker(*args, **kwargs)
11
+ # Regular multiprocessing workers don't fully clean up after themselves,
12
+ # so we have to explicitly trigger garbage collection to make sure that all
13
+ # destructors are called...
14
+ gc.collect()
15
+
16
+
17
+ class Pool(multiprocessing.pool.Pool):
18
+ """Pool implementation which uses our version of SimpleQueue.
19
+
20
+ This lets us pass tensors in shared memory across processes instead of
21
+ serializing the underlying data.
22
+ """
23
+
24
+ def _setup_queues(self):
25
+ self._inqueue = SimpleQueue()
26
+ self._outqueue = SimpleQueue()
27
+ self._quick_put = self._inqueue._writer.send
28
+ self._quick_get = self._outqueue._reader.recv
29
+
30
+ def _repopulate_pool(self):
31
+ """Increase the number of pool processes to the specified number.
32
+
33
+ Bring the number of pool processes up to the specified number, for use after
34
+ reaping workers which have exited.
35
+ """
36
+ for i in range(self._processes - len(self._pool)):
37
+ # changed worker -> clean_worker
38
+ args = (
39
+ self._inqueue,
40
+ self._outqueue,
41
+ self._initializer,
42
+ self._initargs,
43
+ self._maxtasksperchild,
44
+ )
45
+ if hasattr(self, "_wrap_exception"):
46
+ args += (self._wrap_exception,)
47
+ w = self.Process(target=clean_worker, args=args)
48
+ self._pool.append(w)
49
+ w.name = w.name.replace("Process", "PoolWorker")
50
+ w.daemon = True
51
+ w.start()
52
+ util.debug("added worker")
venv/lib/python3.10/site-packages/torch/multiprocessing/queue.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import multiprocessing.queues
3
+ import pickle
4
+ from multiprocessing.reduction import ForkingPickler
5
+
6
+
7
+ class ConnectionWrapper:
8
+ """Proxy class for _multiprocessing.Connection which uses ForkingPickler for object serialization."""
9
+
10
+ def __init__(self, conn):
11
+ self.conn = conn
12
+
13
+ def send(self, obj):
14
+ buf = io.BytesIO()
15
+ ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(obj)
16
+ self.send_bytes(buf.getvalue())
17
+
18
+ def recv(self):
19
+ buf = self.recv_bytes()
20
+ return pickle.loads(buf)
21
+
22
+ def __getattr__(self, name):
23
+ if "conn" in self.__dict__:
24
+ return getattr(self.conn, name)
25
+ raise AttributeError(f"'{type(self).__name__}' object has no attribute 'conn'")
26
+
27
+
28
+ class Queue(multiprocessing.queues.Queue):
29
+ def __init__(self, *args, **kwargs):
30
+ super().__init__(*args, **kwargs)
31
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
32
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
33
+ self._send = self._writer.send
34
+ self._recv = self._reader.recv
35
+
36
+
37
+ class SimpleQueue(multiprocessing.queues.SimpleQueue):
38
+ def _make_methods(self):
39
+ if not isinstance(self._reader, ConnectionWrapper):
40
+ self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
41
+ self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
42
+ super()._make_methods() # type: ignore[misc]
venv/lib/python3.10/site-packages/torch/multiprocessing/reductions.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ import threading
4
+ from multiprocessing.reduction import ForkingPickler
5
+ from multiprocessing.util import register_after_fork
6
+ from typing import Union
7
+
8
+ import torch
9
+ import torch.utils.hooks
10
+ from torch._namedtensor_internals import check_serializing_named_tensor
11
+
12
+ try:
13
+ # Early load resource_sharer to prevent a partially initialized instance
14
+ # from being inherited in a forked child process. The reduce_storage method
15
+ # requires this module indirectly through DupFd(). The built-in mp.Queue
16
+ # class pickles arguments in a background thread which may overlap with the
17
+ # fork.
18
+ import multiprocessing.resource_sharer
19
+ except ImportError:
20
+ pass
21
+
22
+
23
+ class StorageWeakRef:
24
+ r"""A weak reference to a Storage.
25
+
26
+ The cdata member is a Python number containing the integer representation of
27
+ the Storage pointer.
28
+ """
29
+
30
+ __slots__ = ["cdata", "_free_weak_ref"]
31
+
32
+ def __init__(self, storage):
33
+ self.cdata = storage._weak_ref()
34
+ # Save a direct reference to _free_weak_ref because the `torch` module
35
+ # might be cleared during Python shutdown before this module is cleared.
36
+ self._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
37
+
38
+ @classmethod
39
+ def from_weakref(cls, cdata):
40
+ instance = cls.__new__(cls)
41
+ instance.cdata = cdata
42
+ instance._free_weak_ref = torch.Storage._free_weak_ref # type: ignore[attr-defined]
43
+ return instance
44
+
45
+ def expired(self):
46
+ return torch.Storage._expired(self.cdata) # type: ignore[attr-defined]
47
+
48
+ def __del__(self):
49
+ self._free_weak_ref(self.cdata)
50
+
51
+ def __hash__(self):
52
+ return self.cdata
53
+
54
+ def __eq__(self, other):
55
+ if id(self) == id(other):
56
+ return True
57
+ return self.cdata == other.cdata
58
+
59
+
60
+ class SharedCache(dict):
61
+ """Dictionary from multiprocessing handles to StorageWeakRef."""
62
+
63
+ def __init__(self):
64
+ # free_dead_references() is called if the len exceeds the current
65
+ # limit. The limit scales with the number of remaining live objects.
66
+ self.limit = 128
67
+ # `fork` inherits lock state, so in case we fork when the lock is held,
68
+ # we register a function to reset the lock to a new object to avoid
69
+ # possible deadlocks, following python multiprocessing library design.
70
+ self._after_fork()
71
+ register_after_fork(self, SharedCache._after_fork)
72
+
73
+ def _after_fork(self):
74
+ self.lock = threading.Lock()
75
+
76
+ def get(self, key):
77
+ with self.lock:
78
+ return dict.get(self, key)
79
+
80
+ def __setitem__(self, key, storage_ref):
81
+ with self.lock:
82
+ dict.__setitem__(self, key, storage_ref)
83
+ if len(self) > self.limit:
84
+ self.free_dead_references()
85
+
86
+ def free_dead_references(self):
87
+ live = 0
88
+ for key, storage_ref in list(self.items()):
89
+ if storage_ref.expired():
90
+ del self[key]
91
+ else:
92
+ live += 1
93
+ self.limit = max(128, live * 2)
94
+
95
+
96
+ # mapping from handles to StorageWeakRef objects
97
+ shared_cache = SharedCache()
98
+
99
+
100
+ def rebuild_event(device, handle):
101
+ return torch.cuda.Event.from_ipc_handle(device, handle)
102
+
103
+
104
+ def reduce_event(event):
105
+ handle = event.ipc_handle()
106
+ return (rebuild_event, (event.device, handle))
107
+
108
+
109
+ def rebuild_tensor(cls, storage, metadata):
110
+ storage_offset, size, stride, requires_grad = metadata
111
+ t = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
112
+ if cls == torch.nn.parameter.Parameter:
113
+ # we have to pass requires_grad into constructor, rather than set it as an
114
+ # attribute later, because it's an important check for Integer Tensors to
115
+ # have requires_grad=False (or else they raise an error)
116
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
117
+ else:
118
+ t.requires_grad = requires_grad
119
+ return t
120
+
121
+
122
+ def rebuild_cuda_tensor(
123
+ tensor_cls,
124
+ tensor_size,
125
+ tensor_stride,
126
+ tensor_offset,
127
+ storage_cls,
128
+ dtype,
129
+ storage_device,
130
+ storage_handle,
131
+ storage_size_bytes,
132
+ storage_offset_bytes,
133
+ requires_grad,
134
+ ref_counter_handle,
135
+ ref_counter_offset,
136
+ event_handle,
137
+ event_sync_required,
138
+ ):
139
+ # If storage_handle is None, storage points to nullptr.
140
+ if storage_handle is None or storage_size_bytes == 0:
141
+ storage = storage_cls(0, dtype=dtype, device=storage_device, _internal=True)
142
+ else:
143
+ storage = storage_from_cache(
144
+ storage_cls, (storage_handle, storage_offset_bytes)
145
+ )
146
+ if storage is None:
147
+ torch.cuda._lazy_init()
148
+ storage = storage_cls._new_shared_cuda(
149
+ storage_device,
150
+ storage_handle,
151
+ storage_size_bytes,
152
+ storage_offset_bytes,
153
+ ref_counter_handle,
154
+ ref_counter_offset,
155
+ event_handle,
156
+ event_sync_required,
157
+ )
158
+ shared_cache[(storage_handle, storage_offset_bytes)] = StorageWeakRef(
159
+ storage
160
+ )
161
+ else:
162
+ # We already ref counting this Storage, but producer needs new ref-counters to be released.
163
+ storage_cls._release_ipc_counter(
164
+ ref_counter_handle, ref_counter_offset, device=storage_device
165
+ )
166
+
167
+ _storage = (
168
+ storage
169
+ if isinstance(storage, torch.UntypedStorage)
170
+ else storage._untyped_storage
171
+ )
172
+
173
+ t = torch._utils._rebuild_tensor(
174
+ torch.storage.TypedStorage(wrap_storage=_storage, dtype=dtype, _internal=True),
175
+ tensor_offset,
176
+ tensor_size,
177
+ tensor_stride,
178
+ )
179
+
180
+ if tensor_cls == torch.nn.parameter.Parameter:
181
+ # It is crucial for integer tensors to receive
182
+ # the requires_grad=False as an argument in the constructor
183
+ t = torch.nn.parameter.Parameter(t, requires_grad=requires_grad)
184
+ else:
185
+ t.requires_grad = requires_grad
186
+
187
+ return t
188
+
189
+
190
+ def reduce_tensor(tensor):
191
+ if tensor.requires_grad and not tensor.is_leaf:
192
+ raise RuntimeError(
193
+ "Cowardly refusing to serialize non-leaf tensor which requires_grad, "
194
+ "since autograd does not support crossing process boundaries. "
195
+ "If you just want to transfer the data, call detach() on the tensor "
196
+ "before serializing (e.g., putting it on the queue)."
197
+ )
198
+
199
+ check_serializing_named_tensor(tensor)
200
+ torch.utils.hooks.warn_if_has_hooks(tensor)
201
+
202
+ # Note [CUDA IPC and the caching allocator]
203
+ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
204
+ # When you send a CUDA tensor over IPC, you might expect that you will
205
+ # get out the same storage from the other end. However, the CUDA caching
206
+ # allocator makes it difficult to preserve this invariant. Consider
207
+ # the following situation: a tensor of size 0x100 points to offset 0x20 of
208
+ # a storage at 0xA100 of size 0x100. (For simplicity, all of these
209
+ # sizes are given in bytes). HOWEVER, with the caching allocator, this storage
210
+ # might be part of a larger cudaMalloc allocation 0xA000 of size 0x4000.
211
+ #
212
+ # When we want to send this CUDA tensor over IPC, we must send the
213
+ # *entire* cudaMalloc allocation, i.e., the 0xA000 region, not just
214
+ # the storage 0xA100 (because that is what CUDA supports). So, on the
215
+ # other end, there simply isn't any way to say, "Wait, you gave me
216
+ # a bigger region (0xA000) than the one I wanted (0xA100)".
217
+ #
218
+ # OK, so if you sent the cudaMalloc allocation, can you just wrap that up as
219
+ # one storage itself? No, because this cudaMalloc allocation might contain
220
+ # storages of mixed types: float, bytes, double... If you make the entire
221
+ # allocation a single storage of a type A, we'll hit an error when constructing
222
+ # a tensor of type B on the storage.
223
+ #
224
+ # cudaIpcMemHandle is an identifier to access the sender cudaMalloc allocation on the
225
+ # receiver side. However, cudaIpcMemHandles from each device in a given process may
226
+ # only be opened by one context per device per other process.
227
+ # If we open and close a memory handle multiples times in a process, CUDA is allowed
228
+ # to give it a different address; similarly, once we close the memory, we're not
229
+ # allowed to access it(and the storage/tensor built on top of it), even if it is
230
+ # still live in the original process. As we cannot make a cudaMalloc allocation
231
+ # to a single storage in one go, this requires us to cache the device pointer for
232
+ # each cudaIpcMemHandle on C++ side to reconstruct types of storages, while keep
233
+ # the old ones alives.
234
+ # See [https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html]
235
+ #
236
+ # This is fine, because all we need to do is to save our position in the allocation,
237
+ # and reconstruct storage and tensor from it.
238
+ # 0xA000 -> -------CUDA Allocation------
239
+ # | |
240
+ # | |
241
+ # | |
242
+ # | |
243
+ # 0xA100 -> --------storage1 begin------
244
+ # | |
245
+ # 0xA120 -> --------tensor1 begin ------
246
+ # | |
247
+ # | |
248
+ # | |
249
+ # | |
250
+ # | |
251
+ # 0xA160 -> --------tensor1 end---------
252
+ # | |
253
+ # | |
254
+ # | |
255
+ # 0xA200 -> --------storage1 end--------
256
+ # | |
257
+ # 0xE000 -> --------CUDA allocation-----
258
+ #
259
+ # To send tensor1, the following info are required from sender to receiver for
260
+ # storage recontruction.
261
+ # 1. cudaIpcMemHandle of 0xA000(which can be mapped to a basePtr in receiver process).
262
+ # basePtr may not be exactly 0xA000 since it's a different process.
263
+ # 2. offset(0xA100) of storage1 in the CUDA allocation.
264
+ # 3. size of storage1(0x100).
265
+ #
266
+ # On receiver side:
267
+ # 1. Get the devPtr of the MemHandle to access the memory, reconstruct a storage
268
+ # of the same type using (basePtr, offset, size).
269
+ # 2. we can reconstruct the tensor on top of the reconstructed storage
270
+ # Tensor(size=0x040, offset=0x020, storage=Storage(data=basePtr+0xA100, size=0x0100))
271
+ #
272
+ # This strategy has a few implications:
273
+ #
274
+ # 1. When we serialize a CUDA tensor for IPC, we cannot do it all in one
275
+ # go (non-compositionally), and this requires to have a global map
276
+ # memHandle -> devPtr for each process.
277
+ #
278
+ # 2. We MUST NOT let the new IPC tensor be resizable. Originally, a resize
279
+ # of the storage beyond 0x100 would merely have caused us to do a
280
+ # reallocation. You don't really want to do this, but if you did,
281
+ # all that would happen is that you would lose IPC sharing. But if
282
+ # you do this in the new world, we will happily let you write out of
283
+ # bounds of your "allocation", clobbering unrelated data in the cached
284
+ # allocator block. BAD!
285
+ #
286
+ # By the way, in old versions of PyTorch, we supported this situation
287
+ # natively using a "storage view", which permitted multiple storages to be
288
+ # views on each other. But this was the *only* use of storage views, so we
289
+ # eliminated it so that we could just use tensor views to implement the same
290
+ # thing.
291
+ #
292
+
293
+ # TODO: Handle distinguishing between subclass and non-subclass versions of NT better
294
+ # https://github.com/pytorch/pytorch/issues/110543
295
+ from torch.nested._internal.nested_tensor import NestedTensor
296
+
297
+ if tensor.is_nested and not isinstance(tensor, NestedTensor):
298
+ return reduce_nested_tensor(tensor)
299
+
300
+ if tensor.layout in {
301
+ torch.sparse_coo,
302
+ torch.sparse_csr,
303
+ torch.sparse_bsr,
304
+ torch.sparse_csc,
305
+ torch.sparse_bsc,
306
+ }:
307
+ return reduce_sparse_tensor(tensor)
308
+
309
+ storage = tensor._typed_storage()
310
+
311
+ if storage._untyped_storage.device.type == "cuda":
312
+ (
313
+ device,
314
+ handle,
315
+ storage_size_bytes,
316
+ storage_offset_bytes,
317
+ ref_counter_handle,
318
+ ref_counter_offset,
319
+ event_handle,
320
+ event_sync_required,
321
+ ) = storage._share_cuda_()
322
+ tensor_offset = tensor.storage_offset()
323
+ shared_cache[handle] = StorageWeakRef(storage)
324
+ # _backward_hooks purposely omitted here, see
325
+ # Note [Don't serialize hooks]
326
+ return (
327
+ rebuild_cuda_tensor,
328
+ (
329
+ type(tensor),
330
+ tensor.size(),
331
+ tensor.stride(),
332
+ tensor_offset, # tensor offset in its storage
333
+ type(storage),
334
+ tensor.dtype,
335
+ device,
336
+ handle, # identifier which CUDA allocation is the storage in.
337
+ storage_size_bytes, # size(in bytes) of the storage
338
+ storage_offset_bytes, # offset(in bytes) of the storage in the CUDA allocation
339
+ tensor.requires_grad,
340
+ ref_counter_handle,
341
+ ref_counter_offset,
342
+ event_handle,
343
+ event_sync_required,
344
+ ),
345
+ )
346
+
347
+ # _backward_hooks purposely omitted here, see Note [Don't serialize hooks]
348
+ metadata = (
349
+ tensor.storage_offset(),
350
+ tensor.size(),
351
+ tensor.stride(),
352
+ tensor.requires_grad,
353
+ )
354
+ return (rebuild_tensor, (type(tensor), storage, metadata))
355
+
356
+
357
+ def rebuild_nested_tensor(
358
+ rebuild_buffer_func,
359
+ rebuild_buffer_args,
360
+ rebuild_sizes_func,
361
+ rebuild_sizes_args,
362
+ rebuild_strides_func,
363
+ rebuild_strides_args,
364
+ rebuild_offsets_func,
365
+ rebuild_offsets_args,
366
+ ):
367
+ buffer = rebuild_buffer_func(*rebuild_buffer_args)
368
+ sizes = rebuild_sizes_func(*rebuild_sizes_args)
369
+ strides = rebuild_strides_func(*rebuild_strides_args)
370
+ offsets = rebuild_offsets_func(*rebuild_offsets_args)
371
+ return torch._nested_view_from_buffer_copy(buffer, sizes, strides, offsets)
372
+
373
+
374
+ def reduce_nested_tensor(nt):
375
+ rebuild_buffer_func, rebuild_buffer_args = reduce_tensor(nt.values())
376
+ rebuild_sizes_func, rebuild_sizes_args = reduce_tensor(nt._nested_tensor_size())
377
+ rebuild_strides_func, rebuild_strides_args = reduce_tensor(
378
+ nt._nested_tensor_strides()
379
+ )
380
+ rebuild_offsets_func, rebuild_offsets_args = reduce_tensor(
381
+ nt._nested_tensor_storage_offsets()
382
+ )
383
+
384
+ return (
385
+ rebuild_nested_tensor,
386
+ (
387
+ rebuild_buffer_func,
388
+ rebuild_buffer_args,
389
+ rebuild_sizes_func,
390
+ rebuild_sizes_args,
391
+ rebuild_strides_func,
392
+ rebuild_strides_args,
393
+ rebuild_offsets_func,
394
+ rebuild_offsets_args,
395
+ ),
396
+ )
397
+
398
+
399
+ def rebuild_sparse_coo_tensor(
400
+ rebuild_indices_func,
401
+ rebuild_indices_args,
402
+ rebuild_values_func,
403
+ rebuild_values_args,
404
+ shape,
405
+ is_coalesced,
406
+ ):
407
+ indices = rebuild_indices_func(*rebuild_indices_args)
408
+ values = rebuild_values_func(*rebuild_values_args)
409
+ return torch.sparse_coo_tensor(indices, values, shape, is_coalesced=is_coalesced)
410
+
411
+
412
+ def rebuild_sparse_compressed_tensor(
413
+ rebuild_compressed_indices_func,
414
+ rebuild_compressed_indices_args,
415
+ rebuild_plain_indices_func,
416
+ rebuild_plain_indices_args,
417
+ rebuild_values_func,
418
+ rebuild_values_args,
419
+ shape,
420
+ layout,
421
+ ):
422
+ compressed_indices = rebuild_compressed_indices_func(
423
+ *rebuild_compressed_indices_args
424
+ )
425
+ plain_indices = rebuild_plain_indices_func(*rebuild_plain_indices_args)
426
+ values = rebuild_values_func(*rebuild_values_args)
427
+ return torch.sparse_compressed_tensor(
428
+ compressed_indices, plain_indices, values, shape, layout=layout
429
+ )
430
+
431
+
432
+ def reduce_sparse_tensor(sparse):
433
+ if sparse.layout is torch.sparse_coo:
434
+ rebuild_indices_func, rebuild_indices_args = reduce_tensor(sparse._indices())
435
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse._values())
436
+ return (
437
+ rebuild_sparse_coo_tensor,
438
+ (
439
+ rebuild_indices_func,
440
+ rebuild_indices_args,
441
+ rebuild_values_func,
442
+ rebuild_values_args,
443
+ sparse.shape,
444
+ sparse.is_coalesced(),
445
+ ),
446
+ )
447
+ else:
448
+ if sparse.layout in {torch.sparse_csr, torch.sparse_bsr}:
449
+ compressed_indices = sparse.crow_indices()
450
+ plain_indices = sparse.col_indices()
451
+ elif sparse.layout in {torch.sparse_csc, torch.sparse_bsc}:
452
+ compressed_indices = sparse.ccol_indices()
453
+ plain_indices = sparse.row_indices()
454
+ else:
455
+ raise NotImplementedError(sparse.layout)
456
+ (
457
+ rebuild_compressed_indices_func,
458
+ rebuild_compressed_indices_args,
459
+ ) = reduce_tensor(compressed_indices)
460
+ rebuild_plain_indices_func, rebuild_plain_indices_args = reduce_tensor(
461
+ plain_indices
462
+ )
463
+ rebuild_values_func, rebuild_values_args = reduce_tensor(sparse.values())
464
+ return (
465
+ rebuild_sparse_compressed_tensor,
466
+ (
467
+ rebuild_compressed_indices_func,
468
+ rebuild_compressed_indices_args,
469
+ rebuild_plain_indices_func,
470
+ rebuild_plain_indices_args,
471
+ rebuild_values_func,
472
+ rebuild_values_args,
473
+ sparse.shape,
474
+ sparse.layout,
475
+ ),
476
+ )
477
+
478
+
479
+ def fd_id(fd):
480
+ # Returns a tuple which uniquely identifies a file descriptor. In Mac OS,
481
+ # this doesn't work with shared memory handles, which is why we don't
482
+ # support the "file_descriptor" sharing method on that platform.
483
+ stat = os.fstat(fd)
484
+ return (stat.st_ino, stat.st_dev)
485
+
486
+
487
+ def storage_from_cache(cls, key):
488
+ storage_ref = shared_cache.get(key)
489
+ if storage_ref is None:
490
+ return None
491
+ return torch.UntypedStorage._new_with_weak_ptr(storage_ref.cdata)
492
+
493
+
494
+ def rebuild_storage_fd(cls, df, size):
495
+ fd = df.detach()
496
+ try:
497
+ storage = storage_from_cache(cls, fd_id(fd))
498
+ if storage is not None:
499
+ return storage
500
+ storage = cls._new_shared_fd_cpu(fd, size)
501
+ shared_cache[fd_id(fd)] = StorageWeakRef(storage)
502
+ return storage
503
+ finally:
504
+ os.close(fd)
505
+
506
+
507
+ def rebuild_storage_filename(cls, manager, handle, size, dtype=None):
508
+ storage: Union[torch.TypedStorage, torch.UntypedStorage] = storage_from_cache(
509
+ cls, handle
510
+ )
511
+ if storage is not None:
512
+ return storage._shared_decref()
513
+ if dtype is None:
514
+ storage = torch.UntypedStorage._new_shared_filename_cpu(manager, handle, size)
515
+ else:
516
+ byte_size = size * torch._utils._element_size(dtype)
517
+ untyped_storage: torch.UntypedStorage = (
518
+ torch.UntypedStorage._new_shared_filename_cpu(manager, handle, byte_size)
519
+ )
520
+ storage = torch.TypedStorage(
521
+ wrap_storage=untyped_storage, dtype=dtype, _internal=True
522
+ )
523
+ shared_cache[handle] = StorageWeakRef(storage)
524
+ return storage._shared_decref()
525
+
526
+
527
+ def rebuild_storage_empty(cls):
528
+ return cls()
529
+
530
+
531
+ def rebuild_typed_storage(storage, dtype):
532
+ return torch.storage.TypedStorage(wrap_storage=storage, dtype=dtype, _internal=True)
533
+
534
+
535
+ # Use for torch.storage.TypedStorage
536
+ def reduce_typed_storage(storage):
537
+ return (rebuild_typed_storage, (storage._untyped_storage, storage.dtype))
538
+
539
+
540
+ def rebuild_typed_storage_child(storage, storage_type):
541
+ return storage_type(wrap_storage=storage, _internal=True)
542
+
543
+
544
+ # Use for child classes of torch.storage.TypedStorage, like torch.FloatStorage
545
+ def reduce_typed_storage_child(storage):
546
+ return (rebuild_typed_storage_child, (storage._untyped_storage, type(storage)))
547
+
548
+
549
+ def reduce_storage(storage):
550
+ from . import get_sharing_strategy
551
+
552
+ if storage.is_cuda:
553
+ raise RuntimeError(
554
+ "Cannot pickle CUDA storage; try pickling a CUDA tensor instead"
555
+ )
556
+ elif get_sharing_strategy() == "file_system":
557
+ metadata = storage._share_filename_cpu_()
558
+ cache_key = metadata[1]
559
+ rebuild = rebuild_storage_filename
560
+ if isinstance(storage, torch.TypedStorage):
561
+ metadata += (storage.dtype,)
562
+ storage._shared_incref()
563
+ elif storage.size() == 0:
564
+ # This is special cased because Empty tensors
565
+ # (with size 0) cannot be mmapped.
566
+ return (rebuild_storage_empty, (type(storage),))
567
+ else:
568
+ fd, size = storage._share_fd_cpu_()
569
+ df = multiprocessing.reduction.DupFd(fd)
570
+ cache_key = fd_id(fd)
571
+ metadata = (df, size)
572
+ rebuild = rebuild_storage_fd # type: ignore[assignment]
573
+
574
+ shared_cache[cache_key] = StorageWeakRef(storage)
575
+ return (rebuild, (type(storage),) + metadata)
576
+
577
+
578
+ def init_reductions():
579
+ ForkingPickler.register(torch.cuda.Event, reduce_event)
580
+
581
+ for t in torch._storage_classes:
582
+ if t.__name__ == "UntypedStorage":
583
+ ForkingPickler.register(t, reduce_storage)
584
+ else:
585
+ ForkingPickler.register(t, reduce_typed_storage_child)
586
+
587
+ ForkingPickler.register(torch.storage.TypedStorage, reduce_typed_storage)
588
+
589
+ for t in torch._tensor_classes:
590
+ ForkingPickler.register(t, reduce_tensor)
591
+
592
+ # TODO: Maybe this should be in tensor_classes? :)
593
+ ForkingPickler.register(torch.Tensor, reduce_tensor)
594
+ ForkingPickler.register(torch.nn.parameter.Parameter, reduce_tensor)
venv/lib/python3.10/site-packages/torch/multiprocessing/spawn.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import multiprocessing
3
+ import multiprocessing.connection
4
+ import os
5
+ import pickle
6
+ import signal
7
+ import sys
8
+ import tempfile
9
+ import time
10
+ import warnings
11
+ from typing import Optional
12
+
13
+ from . import _prctl_pr_set_pdeathsig # type: ignore[attr-defined]
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+
18
+ class ProcessException(Exception):
19
+ __slots__ = ["error_index", "error_pid"]
20
+
21
+ def __init__(self, msg: str, error_index: int, pid: int):
22
+ super().__init__(msg)
23
+ self.msg = msg
24
+ self.error_index = error_index
25
+ self.pid = pid
26
+
27
+ def __reduce__(self):
28
+ return type(self), (self.msg, self.error_index, self.pid)
29
+
30
+
31
+ class ProcessRaisedException(ProcessException):
32
+ """Exception raised when a process failed due to an exception raised by the code."""
33
+
34
+ def __init__(
35
+ self,
36
+ msg: str,
37
+ error_index: int,
38
+ error_pid: int,
39
+ ):
40
+ super().__init__(msg, error_index, error_pid)
41
+
42
+
43
+ class ProcessExitedException(ProcessException):
44
+ """Exception raised when a process failed due to signal or exited with a specific code."""
45
+
46
+ __slots__ = ["exit_code"]
47
+
48
+ def __init__(
49
+ self,
50
+ msg: str,
51
+ error_index: int,
52
+ error_pid: int,
53
+ exit_code: int,
54
+ signal_name: Optional[str] = None,
55
+ ):
56
+ super().__init__(msg, error_index, error_pid)
57
+ self.exit_code = exit_code
58
+ self.signal_name = signal_name
59
+
60
+ def __reduce__(self):
61
+ return (
62
+ type(self),
63
+ (self.msg, self.error_index, self.pid, self.exit_code, self.signal_name),
64
+ )
65
+
66
+
67
+ def _wrap(fn, i, args, error_file):
68
+ # prctl(2) is a Linux specific system call.
69
+ # On other systems the following function call has no effect.
70
+ # This is set to ensure that non-daemonic child processes can
71
+ # terminate if their parent terminates before they do.
72
+ _prctl_pr_set_pdeathsig(signal.SIGINT)
73
+
74
+ try:
75
+ fn(i, *args)
76
+ except KeyboardInterrupt:
77
+ pass # SIGINT; Killed by parent, do nothing
78
+ except Exception:
79
+ # Propagate exception to parent process, keeping original traceback
80
+ import traceback
81
+
82
+ with open(error_file, "wb") as fh:
83
+ pickle.dump(traceback.format_exc(), fh)
84
+ sys.exit(1)
85
+
86
+
87
+ class ProcessContext:
88
+ def __init__(self, processes, error_files):
89
+ self.error_files = error_files
90
+ self.processes = processes
91
+ self.sentinels = {
92
+ process.sentinel: index for index, process in enumerate(processes)
93
+ }
94
+
95
+ def pids(self):
96
+ return [int(process.pid) for process in self.processes]
97
+
98
+ def join(self, timeout=None):
99
+ r"""Join one or more processes within spawn context.
100
+
101
+ Attempt to join one or more processes in this spawn context.
102
+ If one of them exited with a non-zero exit status, this function
103
+ kills the remaining processes and raises an exception with the cause
104
+ of the first process exiting.
105
+
106
+ Returns ``True`` if all processes have been joined successfully,
107
+ ``False`` if there are more processes that need to be joined.
108
+
109
+ Args:
110
+ timeout (float): Wait this long before giving up on waiting.
111
+ """
112
+ # Ensure this function can be called even when we're done.
113
+ if len(self.sentinels) == 0:
114
+ return True
115
+
116
+ # Wait for any process to fail or all of them to succeed.
117
+ ready = multiprocessing.connection.wait(
118
+ self.sentinels.keys(),
119
+ timeout=timeout,
120
+ )
121
+
122
+ error_index = None
123
+ for sentinel in ready:
124
+ index = self.sentinels.pop(sentinel)
125
+ process = self.processes[index]
126
+ process.join()
127
+ if process.exitcode != 0:
128
+ error_index = index
129
+ break
130
+
131
+ # Return if there was no error.
132
+ if error_index is None:
133
+ # Return whether or not all processes have been joined.
134
+ return len(self.sentinels) == 0
135
+
136
+ # Assume failure. Terminate processes that are still alive.
137
+ # Try SIGTERM then SIGKILL if the process isn't going down.
138
+ # The reason is related to python signal handling is limited
139
+ # to main thread and if that is in c/c++ land and stuck it won't
140
+ # to handle it. We have seen processes getting stuck not handling
141
+ # SIGTERM for the above reason.
142
+ timeout: int = 30
143
+ for process in self.processes:
144
+ if process.is_alive():
145
+ log.warning("Terminating process %s via signal SIGTERM", process.pid)
146
+ process.terminate()
147
+ end = time.monotonic() + timeout
148
+ for process in self.processes:
149
+ time_to_wait = max(0, end - time.monotonic())
150
+ process.join(time_to_wait)
151
+ for process in self.processes:
152
+ if process.is_alive():
153
+ log.warning(
154
+ "Unable to shutdown process %s via SIGTERM , forcefully exiting via SIGKILL",
155
+ process.pid,
156
+ )
157
+ process.kill()
158
+ process.join()
159
+
160
+ # The file will only be created if the process crashed.
161
+ failed_process = self.processes[error_index]
162
+ if not os.access(self.error_files[error_index], os.R_OK):
163
+ exitcode = self.processes[error_index].exitcode
164
+ if exitcode < 0:
165
+ try:
166
+ name = signal.Signals(-exitcode).name
167
+ except ValueError:
168
+ name = f"<Unknown signal {-exitcode}>"
169
+ raise ProcessExitedException(
170
+ "process %d terminated with signal %s" % (error_index, name),
171
+ error_index=error_index,
172
+ error_pid=failed_process.pid,
173
+ exit_code=exitcode,
174
+ signal_name=name,
175
+ )
176
+ else:
177
+ raise ProcessExitedException(
178
+ "process %d terminated with exit code %d" % (error_index, exitcode),
179
+ error_index=error_index,
180
+ error_pid=failed_process.pid,
181
+ exit_code=exitcode,
182
+ )
183
+
184
+ with open(self.error_files[error_index], "rb") as fh:
185
+ original_trace = pickle.load(fh)
186
+ msg = "\n\n-- Process %d terminated with the following error:\n" % error_index
187
+ msg += original_trace
188
+ raise ProcessRaisedException(msg, error_index, failed_process.pid)
189
+
190
+
191
+ class SpawnContext(ProcessContext):
192
+ def __init__(self, processes, error_files):
193
+ warnings.warn("SpawnContext is renamed to ProcessContext since 1.4 release.")
194
+ super().__init__(processes, error_files)
195
+
196
+
197
+ # Note: [start_processes]
198
+ # mp.start_processes handles both start_method='spawn' and 'fork'. It's supposed to be a
199
+ # more generalized API than mp.spawn. Currently we only document mp.spawn as it's the
200
+ # CUDA compatible start_method. However, in environments like Ipython notebooks, 'fork'
201
+ # works better than 'spawn'. Every helper function we created for mp.spawn is indeed
202
+ # general enough, and backends like XLA can reuse them in Colab notebooks as well.
203
+ # Currently we only add this API first, we can consider adding it to documentation as
204
+ # needed in the future.
205
+ def start_processes(
206
+ fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"
207
+ ):
208
+ mp = multiprocessing.get_context(start_method)
209
+ error_files = []
210
+ processes = []
211
+ for i in range(nprocs):
212
+ # Each process is assigned a file to write tracebacks to. We
213
+ # use the file being non-empty to indicate an exception
214
+ # occurred (vs an expected shutdown). Note: this previously
215
+ # used a multiprocessing.Queue but that can be prone to
216
+ # deadlocks, so we went with a simpler solution for a one-shot
217
+ # message between processes.
218
+ tf = tempfile.NamedTemporaryFile(
219
+ prefix="pytorch-errorfile-", suffix=".pickle", delete=False
220
+ )
221
+ tf.close()
222
+ os.unlink(tf.name)
223
+ process = mp.Process(
224
+ target=_wrap,
225
+ args=(fn, i, args, tf.name),
226
+ daemon=daemon,
227
+ )
228
+ process.start()
229
+ error_files.append(tf.name)
230
+ processes.append(process)
231
+
232
+ context = ProcessContext(processes, error_files)
233
+ if not join:
234
+ return context
235
+
236
+ # Loop on join until it returns True or raises an exception.
237
+ while not context.join():
238
+ pass
239
+
240
+
241
+ def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
242
+ r"""Spawns ``nprocs`` processes that run ``fn`` with ``args``.
243
+
244
+ If one of the processes exits with a non-zero exit status, the
245
+ remaining processes are killed and an exception is raised with the
246
+ cause of termination. In the case an exception was caught in the
247
+ child process, it is forwarded and its traceback is included in
248
+ the exception raised in the parent process.
249
+
250
+ Args:
251
+ fn (function): Function is called as the entrypoint of the
252
+ spawned process. This function must be defined at the top
253
+ level of a module so it can be pickled and spawned. This
254
+ is a requirement imposed by multiprocessing.
255
+
256
+ The function is called as ``fn(i, *args)``, where ``i`` is
257
+ the process index and ``args`` is the passed through tuple
258
+ of arguments.
259
+
260
+ args (tuple): Arguments passed to ``fn``.
261
+ nprocs (int): Number of processes to spawn.
262
+ join (bool): Perform a blocking join on all processes.
263
+ daemon (bool): The spawned processes' daemon flag. If set to True,
264
+ daemonic processes will be created.
265
+ start_method (str): (deprecated) this method will always use ``spawn``
266
+ as the start method. To use a different start method
267
+ use ``start_processes()``.
268
+
269
+ Returns:
270
+ None if ``join`` is ``True``,
271
+ :class:`~ProcessContext` if ``join`` is ``False``
272
+
273
+ """
274
+ if start_method != "spawn":
275
+ msg = (
276
+ "This method only supports start_method=spawn (got: %s).\n"
277
+ "To use a different start_method use:\n\t\t"
278
+ " torch.multiprocessing.start_processes(...)" % start_method
279
+ )
280
+ warnings.warn(msg)
281
+ return start_processes(fn, args, nprocs, join, daemon, start_method="spawn")
venv/lib/python3.10/site-packages/torch/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/_functional.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/adadelta.cpython-310.pyc ADDED
Binary file (8.53 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/adagrad.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/adam.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/adamax.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/adamw.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/asgd.cpython-310.pyc ADDED
Binary file (8.55 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/lr_scheduler.cpython-310.pyc ADDED
Binary file (67.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/nadam.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (34.5 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/radam.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/rmsprop.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/rprop.cpython-310.pyc ADDED
Binary file (9.06 kB). View file
 
venv/lib/python3.10/site-packages/torch/optim/__pycache__/sgd.cpython-310.pyc ADDED
Binary file (12.1 kB). View file