applied-ai-018 commited on
Commit
2b04270
·
verified ·
1 Parent(s): ff8ddfa

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/linalg/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/nested/__init__.py +256 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py +259 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py +0 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py +114 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py +1383 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py +216 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py +2 -0
env-llmeval/lib/python3.10/site-packages/torch/linalg/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/torch/linalg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (113 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/nested/__init__.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Union
2
+
3
+ import torch
4
+ from torch import SymInt, Tensor
5
+ from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
6
+
7
+ from torch.types import _device as Device, _dtype as DType
8
+
9
+ __all__ = [
10
+ "to_padded_tensor",
11
+ "as_nested_tensor",
12
+ "nested_tensor",
13
+ "narrow",
14
+ ]
15
+
16
+ # Nested Tensor constructor functions
17
+
18
+
19
+ def as_nested_tensor(
20
+ tensor_list: List[Tensor],
21
+ dtype: Optional[DType] = None,
22
+ device: Optional[Device] = None,
23
+ layout=None
24
+ ) -> Tensor:
25
+ r"""
26
+ Constructs a nested tensor preserving autograd history from :attr:`tensor_list` a list of tensors.
27
+
28
+ .. note::
29
+ Tensors within the list are always copied by this function due to current nested tensor semantics.
30
+
31
+ Args:
32
+ tensor_list (List[Tensor]): a list of tensors with the same ndim
33
+
34
+ Keyword arguments:
35
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
36
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
37
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
38
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
39
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
40
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
41
+
42
+ Example::
43
+
44
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
45
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
46
+ >>> nt = torch.nested.as_nested_tensor([a, b])
47
+ >>> nt.is_leaf
48
+ False
49
+ >>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
50
+ >>> nt.backward(fake_grad)
51
+ >>> a.grad
52
+ tensor([1., 1., 1.])
53
+ >>> b.grad
54
+ tensor([0., 0., 0., 0., 0.])
55
+ """
56
+ if not isinstance(tensor_list, list) or any(
57
+ not isinstance(t, Tensor) for t in tensor_list
58
+ ):
59
+ raise TypeError(
60
+ "as_nested_tensor(): Expected first argument to be a list of tensors "
61
+ )
62
+
63
+ if layout is None:
64
+ layout = torch.strided
65
+ if layout == torch.strided:
66
+ return torch._nested_tensor_from_tensor_list(tensor_list, dtype, None, device, None)
67
+ elif layout == torch.jagged:
68
+ from torch.nested._internal.nested_tensor import jagged_from_list
69
+
70
+ nt, _ = jagged_from_list(tensor_list, offsets=None, device=device, dtype=dtype)
71
+ return nt
72
+ else:
73
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
74
+
75
+
76
+ # Note: This not only adds doc strings for the nested ops, but
77
+ # also connects the torch.nested Python namespace to the torch._C._nested builtins.
78
+
79
+ to_padded_tensor = _add_docstr(
80
+ _nested.nested_to_padded_tensor,
81
+ r"""
82
+ to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
83
+
84
+ Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
85
+ The leading entries will be filled with the nested data,
86
+ while the trailing entries will be padded.
87
+
88
+ .. warning::
89
+
90
+ :func:`to_padded_tensor` always copies the underlying data,
91
+ since the nested and the non-nested tensors differ in memory layout.
92
+
93
+ Args:
94
+ padding (float): The padding value for the trailing entries.
95
+
96
+ Keyword args:
97
+ output_size (Tuple[int]): The size of the output tensor.
98
+ If given, it must be large enough to contain all nested data;
99
+ else, will infer by taking the max size of each nested sub-tensor along each dimension.
100
+ out (Tensor, optional): the output tensor.
101
+
102
+ Example::
103
+
104
+ >>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
105
+ nested_tensor([
106
+ tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
107
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
108
+ tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
109
+ [ 0.2773, 0.8793, -0.5183, -0.6447],
110
+ [ 1.8009, 1.8468, -0.9832, -1.5272]])
111
+ ])
112
+ >>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
113
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
114
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
115
+ [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
116
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
117
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
118
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
119
+ >>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
120
+ tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
121
+ [-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
122
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
123
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
124
+ [[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
125
+ [ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
126
+ [ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
127
+ [ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
128
+ >>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
129
+ RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
130
+
131
+ """,
132
+ )
133
+
134
+ def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
135
+ r"""
136
+ Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
137
+ :ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
138
+
139
+ Args:
140
+ tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor,
141
+ where each element of the list has the same dimensionality.
142
+
143
+ Keyword arguments:
144
+ dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
145
+ Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
146
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
147
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
148
+ device (:class:`torch.device`, optional): the desired device of returned nested tensor.
149
+ Default: if None, same :class:`torch.device` as leftmost tensor in the list
150
+ requires_grad (bool, optional): If autograd should record operations on the
151
+ returned nested tensor. Default: ``False``.
152
+ pin_memory (bool, optional): If set, returned nested tensor would be allocated in
153
+ the pinned memory. Works only for CPU tensors. Default: ``False``.
154
+
155
+ Example::
156
+
157
+ >>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
158
+ >>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
159
+ >>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
160
+ >>> nt.is_leaf
161
+ True
162
+ """
163
+ if layout is None:
164
+ layout = torch.strided
165
+ if layout == torch.strided:
166
+ return _nested.nested_tensor(
167
+ tensor_list,
168
+ dtype=dtype,
169
+ device=device,
170
+ requires_grad=requires_grad,
171
+ pin_memory=pin_memory)
172
+ elif layout == torch.jagged:
173
+ # Need to:
174
+ # * Detach tensors to discard autograd history
175
+ # * Wrap lists of scalars as tensors
176
+ list_of_tensors = [t.detach() if isinstance(t, Tensor) else torch.as_tensor(t)
177
+ for t in tensor_list]
178
+
179
+ from torch.nested._internal.nested_tensor import jagged_from_list
180
+
181
+ with torch.no_grad():
182
+ nt, _ = jagged_from_list(list_of_tensors, offsets=None, device=device, dtype=dtype)
183
+
184
+ nt.requires_grad_(requires_grad)
185
+ if pin_memory:
186
+ nt = nt.pin_memory() # type: ignore[assignment]
187
+
188
+ return nt
189
+ else:
190
+ raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
191
+
192
+
193
+ def narrow(tensor: Tensor, dim: int, start: Union[int, Tensor], length: Union[int, Tensor], layout=torch.strided) -> Tensor:
194
+ r"""
195
+ Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows
196
+ similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor
197
+ shows only the elements in the interval `[start, start+length)`. As nested representations
198
+ allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length`
199
+ can also be tensors of shape `tensor.shape[0]`.
200
+
201
+ There's some differences depending on the layout you use for the nested tensor. If using strided layout,
202
+ torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while
203
+ jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular
204
+ representation is really useful for representing kv-caches in Transformer models, as specialized
205
+ SDPA kernels can deal with format easily, resulting in performance improvements.
206
+
207
+
208
+ Args:
209
+ tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data
210
+ for the nested tensor if using the jagged layout or will be copied for the strided layout.
211
+ dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the
212
+ jagged layout, while strided supports all dim
213
+ start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation
214
+ length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op
215
+
216
+ Keyword arguments:
217
+ layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
218
+ Only strided and jagged layouts are supported. Default: if None, the strided layout.
219
+
220
+ Example::
221
+
222
+ >>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
223
+ >>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64)
224
+ >>> narrow_base = torch.randn(5, 10, 20)
225
+ >>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged)
226
+ >>> nt_narrowed.is_contiguous()
227
+ False
228
+ """
229
+ if not isinstance(start, (int, SymInt, Tensor)):
230
+ raise RuntimeError("start must be an integer or a tensor")
231
+
232
+ if not isinstance(length, (int, SymInt, Tensor)):
233
+ raise RuntimeError("length must be an integer or a tensor")
234
+
235
+ if layout == torch.strided:
236
+ if isinstance(start, Tensor) or isinstance(length, Tensor):
237
+ raise RuntimeError("start and length must be integers for the strided layout NT impl")
238
+ # TODO: switch to as_nested_tensor(tensor) when it is available
239
+ nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(dim, start, length)
240
+ elif layout == torch.jagged:
241
+ if dim != 1:
242
+ raise RuntimeError("jagged layout only supports dim=1")
243
+
244
+ from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths
245
+
246
+ if isinstance(start, (int, SymInt)):
247
+ start = torch.tensor([start], device=tensor.device, dtype=torch.int64)
248
+
249
+ if isinstance(length, (int, SymInt)):
250
+ length = torch.tensor([length], device=tensor.device, dtype=torch.int64)
251
+
252
+ nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length)
253
+ else:
254
+ raise RuntimeError(f"Specified layout is unsupported for nested narrow: {layout}")
255
+
256
+ return nt
env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (344 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_comparison.cpython-310.pyc ADDED
Binary file (52.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/__pycache__/_creation.cpython-310.pyc ADDED
Binary file (9.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autocast_test_lists.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/autograd_function_db.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/check_kernel_launches.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_cuda.cpython-310.pyc ADDED
Binary file (8.72 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_device_type.cpython-310.pyc ADDED
Binary file (37.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dist_composable.cpython-310.pyc ADDED
Binary file (4.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_distributed.cpython-310.pyc ADDED
Binary file (38 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_dtype.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_fsdp.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_jit.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_methods_invocations.cpython-310.pyc ADDED
Binary file (541 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_modules.cpython-310.pyc ADDED
Binary file (74.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_nn.cpython-310.pyc ADDED
Binary file (154 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_pruning.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantization.cpython-310.pyc ADDED
Binary file (95.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_quantized.cpython-310.pyc ADDED
Binary file (7.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_subclass.cpython-310.pyc ADDED
Binary file (7.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/common_utils.cpython-310.pyc ADDED
Binary file (151 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/composite_compliance.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/control_flow_opinfo_db.cpython-310.pyc ADDED
Binary file (2.57 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/custom_op_db.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/dist_utils.cpython-310.pyc ADDED
Binary file (6.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/hypothesis_utils.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/inductor_utils.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_metaprogramming_utils.cpython-310.pyc ADDED
Binary file (24.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/jit_utils.cpython-310.pyc ADDED
Binary file (28.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_tensor.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/logging_utils.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/quantization_torch_package_models.cpython-310.pyc ADDED
Binary file (1.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/triton_utils.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/__pycache__/two_tensor.cpython-310.pyc ADDED
Binary file (3.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/parameter_server_test.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/__pycache__/reinforcement_learning_rpc_test.cpython-310.pyc ADDED
Binary file (8.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/examples/reinforcement_learning_rpc_test.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # If you need to modify this file to make this test pass, please also apply same edits accordingly to
2
+ # https://github.com/pytorch/examples/blob/master/distributed/rpc/rl/main.py
3
+ # and https://pytorch.org/tutorials/intermediate/rpc_tutorial.html
4
+
5
+ import numpy as np
6
+ from itertools import count
7
+
8
+ import torch
9
+ import torch.distributed.rpc as rpc
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ import torch.optim as optim
13
+ from torch.distributed.rpc import RRef, rpc_sync, rpc_async, remote
14
+ from torch.distributions import Categorical
15
+
16
+ from torch.testing._internal.dist_utils import dist_init, worker_name
17
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import RpcAgentTestFixture
18
+
19
+ TOTAL_EPISODE_STEP = 5000
20
+ GAMMA = 0.1
21
+ SEED = 543
22
+
23
+ def _call_method(method, rref, *args, **kwargs):
24
+ r"""
25
+ a helper function to call a method on the given RRef
26
+ """
27
+ return method(rref.local_value(), *args, **kwargs)
28
+
29
+
30
+ def _remote_method(method, rref, *args, **kwargs):
31
+ r"""
32
+ a helper function to run method on the owner of rref and fetch back the
33
+ result using RPC
34
+ """
35
+ args = [method, rref] + list(args)
36
+ return rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)
37
+
38
+
39
+ class Policy(nn.Module):
40
+ r"""
41
+ Borrowing the ``Policy`` class from the Reinforcement Learning example.
42
+ Copying the code to make these two examples independent.
43
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
44
+ """
45
+ def __init__(self):
46
+ super().__init__()
47
+ self.affine1 = nn.Linear(4, 128)
48
+ self.dropout = nn.Dropout(p=0.6)
49
+ self.affine2 = nn.Linear(128, 2)
50
+
51
+ self.saved_log_probs = []
52
+ self.rewards = []
53
+
54
+ def forward(self, x):
55
+ x = self.affine1(x)
56
+ x = self.dropout(x)
57
+ x = F.relu(x)
58
+ action_scores = self.affine2(x)
59
+ return F.softmax(action_scores, dim=1)
60
+
61
+
62
+ class DummyEnv:
63
+ r"""
64
+ A dummy environment that implements the required subset of the OpenAI gym
65
+ interface. It exists only to avoid a dependency on gym for running the
66
+ tests in this file. It is designed to run for a set max number of iterations,
67
+ returning random states and rewards at each step.
68
+ """
69
+ def __init__(self, state_dim=4, num_iters=10, reward_threshold=475.0):
70
+ self.state_dim = state_dim
71
+ self.num_iters = num_iters
72
+ self.iter = 0
73
+ self.reward_threshold = reward_threshold
74
+
75
+ def seed(self, manual_seed):
76
+ torch.manual_seed(manual_seed)
77
+
78
+ def reset(self):
79
+ self.iter = 0
80
+ return torch.randn(self.state_dim)
81
+
82
+ def step(self, action):
83
+ self.iter += 1
84
+ state = torch.randn(self.state_dim)
85
+ reward = torch.rand(1).item() * self.reward_threshold
86
+ done = self.iter >= self.num_iters
87
+ info = {}
88
+ return state, reward, done, info
89
+
90
+
91
+ class Observer:
92
+ r"""
93
+ An observer has exclusive access to its own environment. Each observer
94
+ captures the state from its environment, and send the state to the agent to
95
+ select an action. Then, the observer applies the action to its environment
96
+ and reports the reward to the agent.
97
+ """
98
+ def __init__(self):
99
+ self.id = rpc.get_worker_info().id
100
+ self.env = DummyEnv()
101
+ self.env.seed(SEED)
102
+
103
+ def run_episode(self, agent_rref, n_steps):
104
+ r"""
105
+ Run one episode of n_steps.
106
+ Arguments:
107
+ agent_rref (RRef): an RRef referencing the agent object.
108
+ n_steps (int): number of steps in this episode
109
+ """
110
+ state, ep_reward = self.env.reset(), 0
111
+ for step in range(n_steps):
112
+ # send the state to the agent to get an action
113
+ action = _remote_method(Agent.select_action, agent_rref, self.id, state)
114
+
115
+ # apply the action to the environment, and get the reward
116
+ state, reward, done, _ = self.env.step(action)
117
+
118
+ # report the reward to the agent for training purpose
119
+ _remote_method(Agent.report_reward, agent_rref, self.id, reward)
120
+
121
+ if done:
122
+ break
123
+
124
+
125
+ class Agent:
126
+ def __init__(self, world_size):
127
+ self.ob_rrefs = []
128
+ self.agent_rref = RRef(self)
129
+ self.rewards = {}
130
+ self.saved_log_probs = {}
131
+ self.policy = Policy()
132
+ self.optimizer = optim.Adam(self.policy.parameters(), lr=1e-2)
133
+ self.eps = np.finfo(np.float32).eps.item()
134
+ self.running_reward = 0
135
+ self.reward_threshold = DummyEnv().reward_threshold
136
+ for ob_rank in range(1, world_size):
137
+ ob_info = rpc.get_worker_info(worker_name(ob_rank))
138
+ self.ob_rrefs.append(remote(ob_info, Observer))
139
+ self.rewards[ob_info.id] = []
140
+ self.saved_log_probs[ob_info.id] = []
141
+
142
+ def select_action(self, ob_id, state):
143
+ r"""
144
+ This function is mostly borrowed from the Reinforcement Learning example.
145
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
146
+ The main difference is that instead of keeping all probs in one list,
147
+ the agent keeps probs in a dictionary, one key per observer.
148
+
149
+ NB: no need to enforce thread-safety here as GIL will serialize
150
+ executions.
151
+ """
152
+ probs = self.policy(state.unsqueeze(0))
153
+ m = Categorical(probs)
154
+ action = m.sample()
155
+ self.saved_log_probs[ob_id].append(m.log_prob(action))
156
+ return action.item()
157
+
158
+ def report_reward(self, ob_id, reward):
159
+ r"""
160
+ Observers call this function to report rewards.
161
+ """
162
+ self.rewards[ob_id].append(reward)
163
+
164
+ def run_episode(self, n_steps=0):
165
+ r"""
166
+ Run one episode. The agent will tell each observer to run n_steps.
167
+ """
168
+ futs = []
169
+ for ob_rref in self.ob_rrefs:
170
+ # make async RPC to kick off an episode on all observers
171
+ futs.append(
172
+ rpc_async(
173
+ ob_rref.owner(),
174
+ _call_method,
175
+ args=(Observer.run_episode, ob_rref, self.agent_rref, n_steps)
176
+ )
177
+ )
178
+
179
+ # wait until all observers have finished this episode
180
+ for fut in futs:
181
+ fut.wait()
182
+
183
+ def finish_episode(self):
184
+ r"""
185
+ This function is mostly borrowed from the Reinforcement Learning example.
186
+ See https://github.com/pytorch/examples/tree/master/reinforcement_learning
187
+ The main difference is that it joins all probs and rewards from
188
+ different observers into one list, and uses the minimum observer rewards
189
+ as the reward of the current episode.
190
+ """
191
+
192
+ # joins probs and rewards from different observers into lists
193
+ R, probs, rewards = 0, [], []
194
+ for ob_id in self.rewards:
195
+ probs.extend(self.saved_log_probs[ob_id])
196
+ rewards.extend(self.rewards[ob_id])
197
+
198
+ # use the minimum observer reward to calculate the running reward
199
+ min_reward = min([sum(self.rewards[ob_id]) for ob_id in self.rewards])
200
+ self.running_reward = 0.05 * min_reward + (1 - 0.05) * self.running_reward
201
+
202
+ # clear saved probs and rewards
203
+ for ob_id in self.rewards:
204
+ self.rewards[ob_id] = []
205
+ self.saved_log_probs[ob_id] = []
206
+
207
+ policy_loss, returns = [], []
208
+ for r in rewards[::-1]:
209
+ R = r + GAMMA * R
210
+ returns.insert(0, R)
211
+ returns = torch.tensor(returns)
212
+ returns = (returns - returns.mean()) / (returns.std() + self.eps)
213
+ for log_prob, R in zip(probs, returns):
214
+ policy_loss.append(-log_prob * R)
215
+ self.optimizer.zero_grad()
216
+ policy_loss = torch.cat(policy_loss).sum()
217
+ policy_loss.backward()
218
+ self.optimizer.step()
219
+ return min_reward
220
+
221
+
222
+ def run_agent(agent, n_steps):
223
+ for i_episode in count(1):
224
+ agent.run_episode(n_steps=n_steps)
225
+ last_reward = agent.finish_episode()
226
+
227
+ if agent.running_reward > agent.reward_threshold:
228
+ print(f"Solved! Running reward is now {agent.running_reward}!")
229
+ break
230
+
231
+
232
+ class ReinforcementLearningRpcTest(RpcAgentTestFixture):
233
+ @dist_init(setup_rpc=False)
234
+ def test_rl_rpc(self):
235
+ if self.rank == 0:
236
+ # Rank 0 is the agent.
237
+ rpc.init_rpc(
238
+ name=worker_name(self.rank),
239
+ backend=self.rpc_backend,
240
+ rank=self.rank,
241
+ world_size=self.world_size,
242
+ rpc_backend_options=self.rpc_backend_options,
243
+ )
244
+ agent = Agent(self.world_size)
245
+ run_agent(agent, n_steps=int(TOTAL_EPISODE_STEP / (self.world_size - 1)))
246
+
247
+ # Ensure training was run. We don't really care about whether the task was learned,
248
+ # since the purpose of the test is to check the API calls.
249
+ self.assertGreater(agent.running_reward, 0.0)
250
+ else:
251
+ # Other ranks are observers that passively wait for instructions from the agent.
252
+ rpc.init_rpc(
253
+ name=worker_name(self.rank),
254
+ backend=self.rpc_backend,
255
+ rank=self.rank,
256
+ world_size=self.world_size,
257
+ rpc_backend_options=self.rpc_backend_options,
258
+ )
259
+ rpc.shutdown()
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (208 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/dist_autograd_test.cpython-310.pyc ADDED
Binary file (4.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test.cpython-310.pyc ADDED
Binary file (41.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/__pycache__/rpc_test_faulty.cpython-310.pyc ADDED
Binary file (6.03 kB). View file
 
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/dist_autograd_test.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple
2
+
3
+ import torch
4
+ import torch.distributed.autograd as dist_autograd
5
+ import torch.distributed.rpc as rpc
6
+ from torch import Tensor
7
+ from torch.distributed.rpc import rpc_async
8
+ from torch.testing import FileCheck
9
+ from torch.testing._internal.dist_utils import dist_init, worker_name
10
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
11
+ RpcAgentTestFixture,
12
+ )
13
+
14
+
15
+ @torch.jit.script
16
+ def local_add(t1, t2):
17
+ return torch.add(t1, t2)
18
+
19
+
20
+ @torch.jit.script
21
+ def remote_add(t1, t2, dst: str): # noqa: E999
22
+ return rpc_async(dst, local_add, (t1, t2)).wait()
23
+
24
+
25
+ @torch.jit.script
26
+ def fork_add(t1, t2, dst: str):
27
+ fut = torch.jit._fork(remote_add, t1, t2, dst)
28
+ return torch.jit._wait(fut)
29
+
30
+
31
+ class JitDistAutogradTest(RpcAgentTestFixture):
32
+ @dist_init
33
+ def test_get_gradients(self):
34
+ dst_rank = self.rank
35
+
36
+ @torch.jit.script
37
+ def dist_get_gradients(context_id: int) -> (Dict[Tensor, Tensor]):
38
+ return dist_autograd.get_gradients(context_id)
39
+
40
+ FileCheck().check("get_gradients").run(str(dist_get_gradients.graph))
41
+ with dist_autograd.context() as context_id:
42
+ t1 = torch.rand((3, 3), requires_grad=True)
43
+ t2 = torch.rand((3, 3), requires_grad=True)
44
+ t3 = torch.add(t1, t2)
45
+
46
+ dist_autograd.backward(context_id, [t3.sum()])
47
+ grads = dist_get_gradients(context_id)
48
+
49
+ self.assertEqual(2, len(grads))
50
+ self.assertIn(t1, grads)
51
+ self.assertIn(t2, grads)
52
+ self.assertEqual(torch.ones(3, 3), grads[t1])
53
+ self.assertEqual(torch.ones(3, 3), grads[t2])
54
+
55
+ @dist_init
56
+ def test_dist_backward(self):
57
+ if self.rank != 0:
58
+ return
59
+
60
+ @torch.jit.script
61
+ def dist_backward_script(context_id: int, loss: torch.Tensor):
62
+ dist_autograd.backward(context_id, [loss])
63
+
64
+ FileCheck().check("dist_backward").run(str(dist_backward_script.graph))
65
+ with dist_autograd.context() as context_id:
66
+ t1 = torch.rand(3, 3, requires_grad=True)
67
+ t2 = torch.rand(3, 3, requires_grad=True)
68
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
69
+ loss = rpc.rpc_sync(dst_worker_name, torch.add, args=(t1, t2)).sum()
70
+ dist_backward_script(context_id, loss)
71
+
72
+ @dist_init
73
+ def test_jit_fork_within_context(self):
74
+ with dist_autograd.context() as context_id:
75
+ t1 = torch.rand((3, 3), requires_grad=True)
76
+ t2 = torch.rand((3, 3), requires_grad=True)
77
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
78
+ res = fork_add(t1, t2, dst_worker_name)
79
+ loss = res.sum()
80
+ dist_autograd.backward(context_id, [loss])
81
+
82
+ grads = dist_autograd.get_gradients(context_id)
83
+ self.assertEqual(2, len(grads))
84
+ self.assertIn(t1, grads)
85
+ self.assertIn(t2, grads)
86
+
87
+ @dist_init
88
+ def test_restore_context_after_swtich_to_jit_thread(self):
89
+ if self.rank != 0:
90
+ return
91
+
92
+ @torch.jit.script
93
+ def forward_script(
94
+ context_id: int, dst_worker_name: str, t1: Tensor, t2: Tensor
95
+ ) -> Tuple[Tensor, Tensor]:
96
+ res1_fut = rpc.rpc_async(dst_worker_name, local_add, (t1, t1))
97
+ res1 = res1_fut.wait() # After this, the script runs in a new JIT thread.
98
+ loss1 = res1.sum()
99
+
100
+ # SendRpcBackward is not attached, since DistAutogradContext is lost here.
101
+ res2_fut = rpc.rpc_async(dst_worker_name, local_add, (t2, t2))
102
+ res2 = res2_fut.wait()
103
+ loss2 = res2.sum()
104
+
105
+ return loss1, loss2
106
+
107
+ with dist_autograd.context() as context_id:
108
+ t1 = torch.ones((2, 3), requires_grad=True)
109
+ t2 = torch.ones((2, 3), requires_grad=True)
110
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
111
+ loss0, loss1 = forward_script(context_id, dst_worker_name, t1, t2)
112
+ dist_autograd.backward(context_id, [loss0, loss1])
113
+ grad0, grad1 = dist_autograd.get_gradients(context_id)
114
+ self.assertEqual(grad0, grad1)
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test.py ADDED
@@ -0,0 +1,1383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import io
3
+ from typing import Dict, List, Tuple, Any
4
+
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.distributed.rpc as rpc
8
+ from torch import Tensor
9
+ from torch.autograd.profiler import record_function
10
+ from torch.distributed.rpc import RRef
11
+ from torch.distributed.rpc.internal import RPCExecMode, _build_rpc_profiling_key
12
+ from torch.futures import Future
13
+ from torch.testing._internal.common_utils import TemporaryFileName
14
+ from torch.testing._internal.dist_utils import (
15
+ dist_init,
16
+ get_function_event,
17
+ initialize_pg,
18
+ worker_name,
19
+ )
20
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
21
+ RpcAgentTestFixture,
22
+ )
23
+
24
+ from torch.autograd.profiler_legacy import profile as _profile
25
+
26
+ def rref_isinstance(rref, cls_to_check):
27
+ return isinstance(rref.local_value(), cls_to_check)
28
+
29
+ def sleep(t):
30
+ time.sleep(t)
31
+
32
+
33
+ def rpc_return_rref(dst):
34
+ return rpc.remote(dst, torch.add, args=(torch.ones(2, 2), 1))
35
+
36
+
37
+ @torch.jit.script
38
+ def rref_local_value(rref: RRef[Tensor]) -> Tensor:
39
+ return rref.local_value()
40
+
41
+
42
+ @torch.jit.script
43
+ def list_create() -> List[int]:
44
+ global_list = [1, 2, 3]
45
+ return global_list
46
+
47
+
48
+ @torch.jit.script
49
+ def rref_list_mutate(rref: RRef[List[int]]) -> None:
50
+ rref.local_value().append(4)
51
+ rref.to_here().append(5)
52
+ rref.to_here(5.0).append(6)
53
+
54
+
55
+ def return_value(value: int) -> int:
56
+ return value
57
+
58
+
59
+ class RRefAPITest:
60
+ @dist_init
61
+ def test_rref_is_owner(self):
62
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
63
+ rref_var = rpc_return_rref(dst_worker_name)
64
+
65
+ @torch.jit.script
66
+ def rref_tensor_is_owner(rref_var: RRef[Tensor]) -> bool:
67
+ return rref_var.is_owner()
68
+
69
+ res = rref_tensor_is_owner(rref_var)
70
+ self.assertEqual(res, False)
71
+
72
+ @dist_init
73
+ def test_rref_local_value(self):
74
+ if self.rank != 0:
75
+ return
76
+
77
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
78
+ rref = rpc_return_rref(dst_worker_name)
79
+
80
+ with self.assertRaisesRegex(
81
+ RuntimeError, r"Can't call RRef.local_value\(\) on a non-owner RRef"
82
+ ):
83
+ rref_local_value(rref)
84
+
85
+ ret = ret = rpc.rpc_sync(dst_worker_name, rref_local_value, (rref,))
86
+ self.assertEqual(ret, torch.add(torch.ones(2, 2), 1))
87
+
88
+ @dist_init
89
+ def test_local_rref_local_value(self):
90
+ if self.rank != 0:
91
+ return
92
+
93
+ dst_worker_name = worker_name(self.rank)
94
+ rref = rpc.remote(dst_worker_name, return_value, (5,), {})
95
+
96
+ ret = rref_local_value(rref)
97
+ self.assertEqual(ret, 5)
98
+
99
+ def _create_rref(self):
100
+ owner_rank = (self.rank + 2) % self.world_size
101
+ return rpc.remote(
102
+ worker_name(owner_rank), torch.add, args=(torch.zeros(2, 2), 1)
103
+ )
104
+
105
+ @dist_init
106
+ def test_user_rrefs_confirmed(self):
107
+ dst_rank = (self.rank + 1) % self.world_size
108
+ rref = self._create_rref()
109
+ ret = rpc.rpc_sync(
110
+ worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
111
+ )
112
+ self.assertEqual(ret, True)
113
+
114
+ @dist_init
115
+ def test_user_rrefs_confirmed_remote(self):
116
+ dst_rank = (self.rank + 1) % self.world_size
117
+ rref = self._create_rref()
118
+ ret_rref = rpc.remote(
119
+ worker_name(dst_rank), script_check_rref_confirmed, args=(rref,)
120
+ )
121
+ self.assertEqual(ret_rref.to_here(), True)
122
+
123
+ @dist_init
124
+ def test_rref_list_mutate(self):
125
+ dst = worker_name((self.rank + 1) % self.world_size)
126
+ list_rref = rpc.remote(dst, list_create)
127
+
128
+ rpc.rpc_sync(dst, rref_list_mutate, args=(list_rref,))
129
+ self.assertEqual(list_rref.to_here(), [1, 2, 3, 4, 5, 6])
130
+
131
+
132
+ @torch.jit.script
133
+ def no_arg():
134
+ return 0
135
+
136
+
137
+ @torch.jit.script
138
+ def one_arg(value):
139
+ return value + 1
140
+
141
+ @torch.jit.script
142
+ def script_add_ones(x):
143
+ return torch.add(x, torch.ones(1))
144
+
145
+ @torch.jit.script
146
+ def script_add_ones_with_record_function(x, block: str):
147
+ with record_function(block):
148
+ return torch.add(x, torch.ones(1))
149
+
150
+
151
+ @torch.jit.script
152
+ def record_function_on_caller_rpc_async(dst_worker_name: str, block: str) -> Tensor:
153
+ t: Tensor = torch.ones(1)
154
+ with record_function(block) as rf:
155
+ fut1 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
156
+ # Extra operator call to avoid de-duplication of the next async call
157
+ # see https://github.com/pytorch/pytorch/pull/62710#discussion_r694680279
158
+ zero = torch.zeros_like(t)
159
+ fut2 = rpc.rpc_async(dst_worker_name, script_add_ones, (t, ))
160
+ res = fut1.wait() + fut2.wait() + zero
161
+ return res
162
+
163
+
164
+
165
+ @torch.jit.script
166
+ def script_fork_wait_udf(tensor):
167
+ fut = torch.jit._fork(script_add_ones, tensor)
168
+ x = torch.jit._wait(fut)
169
+ return x
170
+
171
+
172
+ @torch.jit.script
173
+ def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
174
+ return rref_var.to_here()
175
+
176
+
177
+ @torch.jit.script
178
+ def return_rref(rref_var: RRef[Tensor]) -> RRef[Tensor]:
179
+ return rref_var
180
+
181
+
182
+ @torch.jit.script
183
+ def script_raise_func(value):
184
+ if value.numel() == 2:
185
+ raise ValueError("Expected error")
186
+ return value + 1
187
+
188
+
189
+ @torch.jit.script
190
+ def script_fork_wait_throw(invalue):
191
+ fut = torch.jit._fork(script_raise_func, invalue)
192
+ value = torch.jit._wait(fut)
193
+ return value
194
+
195
+
196
+ @torch.jit.script
197
+ def call_rpc_with_profiling(record: torch.classes.profiler._RecordFunction, dst_worker_name: str) -> Tensor:
198
+ # Call rpc_async from within ScriptFunction and ensure that we can attach
199
+ # profiling callbacks. Note that handle here is a Tensor representation of
200
+ # RecordFunction.
201
+ fut = rpc.rpc_async(dst_worker_name, one_arg, (torch.tensor(1),))
202
+ torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
203
+ ret = fut.wait()
204
+ return ret
205
+
206
+ @torch.jit.script
207
+ def call_rpc_torchscript_with_record_function(dst_worker_name: str, block: str) -> Tensor:
208
+ fut = rpc.rpc_async(dst_worker_name, script_add_ones_with_record_function, (torch.tensor(1), block))
209
+ return fut.wait()
210
+
211
+
212
+ @torch.jit.script
213
+ def call_fork_with_profiling(record: torch.classes.profiler._RecordFunction) -> Tensor:
214
+ # Call fork from within ScriptFunction and ensure that we can attach profiling
215
+ # callbacks to the resulting future. Note that handle here is a Tensor
216
+ # representation of RecordFunction.
217
+ fut = torch.jit._fork(one_arg, torch.tensor(1))
218
+ torch.ops.profiler._call_end_callbacks_on_jit_fut(record, fut)
219
+ ret = fut.wait()
220
+ return ret
221
+
222
+
223
+ class MyScriptModuleWithRRefs(torch.jit.ScriptModule):
224
+ def __init__(self, dst_worker):
225
+ super().__init__()
226
+ self.rrefs = []
227
+ for _ in range(4):
228
+ self.rrefs.append(rpc_return_rref(dst_worker))
229
+
230
+ @torch.jit.script_method
231
+ def forward(self) -> Tensor:
232
+ res_tensor = torch.ones(2, 2)
233
+ for rref in self.rrefs:
234
+ res_tensor += rref.to_here()
235
+
236
+ return res_tensor
237
+
238
+
239
+ @torch.jit.ignore
240
+ def rref_python_annotation(rref_var: RRef[Tensor]) -> RRef[Tensor]:
241
+ return rref_var
242
+
243
+
244
+ @torch.jit.script
245
+ def rref_script_annotation(rref_var: RRef[Tensor]) -> Tensor:
246
+ return rref_python_annotation(rref_var).to_here()
247
+
248
+
249
+ class RRefTypingTest:
250
+ @dist_init
251
+ def test_rref_as_arg_and_return(self):
252
+ n = self.rank + 1
253
+ dst_rank = n % self.world_size
254
+ local_ret = one_arg(torch.ones(2, 2))
255
+
256
+ # create rref on current rank
257
+ rref = rpc.remote(worker_name(self.rank), one_arg, args=(torch.ones(2, 2),))
258
+
259
+ # pass rref to another user in rpc call
260
+ ret = rpc.rpc_sync(worker_name(dst_rank), rref_to_here, args=(rref,))
261
+ self.assertEqual(ret, local_ret)
262
+
263
+ # return rref in rpc call
264
+ rref1 = rpc.rpc_sync(worker_name(dst_rank), return_rref, args=(rref,))
265
+ self.assertEqual(rref1.to_here(), local_ret)
266
+
267
+ # pass rref to another user in remote call
268
+ rref2 = rpc.remote(worker_name(dst_rank), rref_to_here, args=(rref,))
269
+ self.assertEqual(rref2.to_here(), local_ret)
270
+
271
+ # return rref in remote call
272
+ rref3 = rpc.remote(worker_name(dst_rank), return_rref, args=(rref,))
273
+ self.assertEqual(rref3.to_here().to_here(), local_ret)
274
+
275
+ @dist_init
276
+ def test_my_script_module_with_rrefs(self):
277
+ n = self.rank + 1
278
+ dst_rank = n % self.world_size
279
+
280
+ module_with_rrefs = MyScriptModuleWithRRefs(worker_name(dst_rank))
281
+ res = module_with_rrefs()
282
+ self.assertEqual(res, torch.ones(2, 2) * 9)
283
+
284
+ @dist_init
285
+ def test_rref_python_annotation(self):
286
+ n = self.rank + 1
287
+ dst_rank = n % self.world_size
288
+ rref_var = rpc_return_rref(worker_name(dst_rank))
289
+
290
+ res = rref_script_annotation(rref_var)
291
+ self.assertEqual(res, torch.ones(2, 2) + 1)
292
+
293
+
294
+ class FutureTypingTest:
295
+ @dist_init
296
+ def test_future_passed_between_python_and_jit(self):
297
+ dst_rank = (self.rank + 1) % self.world_size
298
+ inputs = (torch.tensor([1, 1]), torch.tensor([2, 2]))
299
+ ret_fut = rpc.rpc_async(worker_name(dst_rank), two_args_two_kwargs, args=inputs)
300
+ expected_res = torch.tensor([10, 10])
301
+
302
+ @torch.jit.script
303
+ def future_wait_in_script(fut: Future[Tensor]) -> Tensor:
304
+ return fut.wait()
305
+
306
+ self.assertEqual(future_wait_in_script(ret_fut), expected_res)
307
+
308
+ @torch.jit.script
309
+ def future_return_to_python(
310
+ dst_rank: int, inputs: Tuple[Tensor, Tensor]
311
+ ) -> Future[Tensor]:
312
+ return rpc.rpc_async(
313
+ f"worker{dst_rank}", two_args_two_kwargs, inputs
314
+ )
315
+
316
+ fut_res = future_return_to_python(dst_rank, inputs)
317
+ self.assertEqual(fut_res.wait(), expected_res)
318
+
319
+ @dist_init
320
+ def test_future_python_annotation(self):
321
+ if self.rank != 0:
322
+ return
323
+
324
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
325
+ input_0 = torch.ones(2, 2)
326
+ input_1 = 1
327
+ expected_res = torch.add(input_0, input_1)
328
+
329
+ @torch.jit.ignore
330
+ def python_return_future() -> Future[Tensor]:
331
+ fut = rpc.rpc_async(dst_worker_name, torch.add, (input_0, input_1), {})
332
+ return fut
333
+
334
+ @torch.jit.script
335
+ def script_use_future() -> Tensor:
336
+ fut = python_return_future()
337
+ return fut.wait()
338
+
339
+ res = script_use_future()
340
+ self.assertEqual(res, expected_res)
341
+
342
+
343
+ @torch.jit.script
344
+ class MyScriptClass:
345
+ def __init__(self, a: int):
346
+ self.a = a
347
+
348
+ def get_value(self) -> int:
349
+ return self.a
350
+
351
+
352
+ @torch.jit.interface
353
+ class MyModuleInterface(torch.nn.Module):
354
+ def forward(self) -> Tensor:
355
+ # pyre-ignore[7]: Pyre and torch.jit.interface don't mix well
356
+ pass
357
+
358
+
359
+ class MyScriptModule(torch.jit.ScriptModule):
360
+ def __init__(self, rank):
361
+ super().__init__()
362
+ self.a = torch.ones(rank)
363
+
364
+ @torch.jit.script_method
365
+ def forward(self) -> Tensor:
366
+ return self.a
367
+
368
+ @torch.jit.script_method
369
+ def custom_func(self) -> Tensor:
370
+ return self.a
371
+
372
+
373
+ def owner_create_rref_my_script_class(a):
374
+ return rpc.RRef(MyScriptClass(a))
375
+
376
+
377
+ def owner_create_rref_my_script_module(a):
378
+ return rpc.RRef(MyScriptModule(a), type_hint=MyModuleInterface)
379
+
380
+
381
+ @torch.jit.script
382
+ def script_rref_get_value_my_script_class(rref: RRef[MyScriptClass]) -> int:
383
+ return rref.to_here().get_value()
384
+
385
+
386
+ @torch.jit.script
387
+ def script_rref_run_forward_my_script_module(rref: RRef[MyModuleInterface]) -> Tensor:
388
+ return rref.to_here().forward()
389
+
390
+
391
+ class LocalRRefTest:
392
+ @dist_init
393
+ def test_create_local_script_class_rref_in_py(self):
394
+ if self.rank != 0:
395
+ return
396
+
397
+ # Create a local RRef<MyScriptClass>.
398
+ rref_script_class = rpc.RRef(MyScriptClass(self.rank))
399
+ ret = rref_script_class.to_here().get_value()
400
+ self.assertEqual(ret, self.rank)
401
+
402
+ @dist_init
403
+ def test_create_local_script_module_rref_in_py(self):
404
+ if self.rank != 0:
405
+ return
406
+
407
+ # Create a local RRef<MyModuleInterface>.
408
+ rref_script_module = rpc.RRef(MyScriptModule(self.rank), MyModuleInterface)
409
+ ret = rref_script_module.to_here().forward()
410
+ self.assertEqual(ret, torch.ones(self.rank))
411
+
412
+ # Create a local RRef<MyModuleInterface> without type hint.
413
+ with self.assertRaisesRegex(
414
+ RuntimeError,
415
+ (
416
+ "The RRef being created contains a ScriptModule, "
417
+ "must provide its ModuleInterface type hint."
418
+ ),
419
+ ):
420
+ rref_script_module = rpc.RRef(MyScriptModule(self.rank))
421
+
422
+ @dist_init
423
+ def test_return_local_script_class_rref_in_py_and_use_in_script(self):
424
+ if self.rank != 0:
425
+ return
426
+
427
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
428
+
429
+ # Create a local RRef<MyScriptClass> remotely in Python.
430
+ rref = rpc.rpc_sync(
431
+ dst_worker_name, owner_create_rref_my_script_class, args=(self.rank,)
432
+ )
433
+
434
+ def use_rref_on_owner(rref: RRef[MyScriptClass]) -> int:
435
+ args = (rref,)
436
+ kwargs: Dict[str, Any] = {}
437
+ fut = rpc.rpc_async(
438
+ rref.owner(), script_rref_get_value_my_script_class, args, kwargs
439
+ )
440
+ ret = fut.wait()
441
+ return ret
442
+
443
+ # Use RRef<MyScriptClass> in local Python RPC and remote Script run.
444
+ ret = use_rref_on_owner(rref)
445
+ self.assertEqual(ret, self.rank)
446
+
447
+ # Use RRef<MyScriptClass> in local Script RPC and remote Script run.
448
+ use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
449
+ ret = use_rref_on_owner_script(rref)
450
+ self.assertEqual(ret, self.rank)
451
+
452
+ @dist_init
453
+ def test_return_local_script_module_rref_in_py_and_use_in_script(self):
454
+ if self.rank != 0:
455
+ return
456
+
457
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
458
+
459
+ # Create a local RRef<MyModuleInterface> remotely in Python.
460
+ rref = rpc.rpc_sync(
461
+ dst_worker_name, owner_create_rref_my_script_module, args=(self.rank,)
462
+ )
463
+
464
+ def use_rref_on_owner(rref: RRef[MyModuleInterface]) -> Tensor:
465
+ args = (rref,)
466
+ kwargs: Dict[str, Any] = {}
467
+ fut = rpc.rpc_async(
468
+ rref.owner_name(),
469
+ script_rref_run_forward_my_script_module,
470
+ args,
471
+ kwargs,
472
+ )
473
+ ret = fut.wait()
474
+ return ret
475
+
476
+ # Use RRef<MyScriptClass> in local Python RPC and remote Script run.
477
+ ret = use_rref_on_owner(rref)
478
+ self.assertEqual(ret, torch.ones(self.rank))
479
+
480
+ # Use RRef<MyScriptClass> in local Script RPC and remote Script run.
481
+ use_rref_on_owner_script = torch.jit.script(use_rref_on_owner)
482
+ ret = use_rref_on_owner_script(rref)
483
+ self.assertEqual(ret, torch.ones(self.rank))
484
+
485
+
486
+ def python_function():
487
+ return 0
488
+
489
+
490
+ @torch.jit.script
491
+ def two_args_two_kwargs(
492
+ first_arg,
493
+ second_arg,
494
+ first_kwarg=torch.tensor([3, 3]),
495
+ second_kwarg=torch.tensor([4, 4]),
496
+ ):
497
+ return first_arg + second_arg + first_kwarg + second_kwarg
498
+
499
+
500
+ @torch.jit.script
501
+ def assorted_types_args_kwargs(
502
+ tensor_arg: Tensor, # noqa: E999
503
+ str_arg: str,
504
+ int_arg: int,
505
+ tensor_kwarg: Tensor = torch.tensor([2, 2]),
506
+ str_kwarg: str = "str_kwarg",
507
+ int_kwarg: int = 2,
508
+ ):
509
+ return tensor_arg + tensor_kwarg, str_arg + str_kwarg, int_arg + int_kwarg
510
+
511
+
512
+ @torch.jit.script
513
+ def raise_script():
514
+ raise RuntimeError("Expected error")
515
+
516
+
517
+ @torch.jit.script
518
+ def script_rpc_async_call(
519
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
520
+ ):
521
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
522
+ ret = fut.wait()
523
+ return ret
524
+
525
+ @torch.jit.script
526
+ def script_rpc_sync_call(
527
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
528
+ ):
529
+ res = rpc.rpc_sync(dst_worker_name, two_args_two_kwargs, args, kwargs)
530
+ return res
531
+
532
+ @torch.jit.script
533
+ def script_rpc_remote_call(
534
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
535
+ ):
536
+ rref_res = rpc.remote(dst_worker_name, two_args_two_kwargs, args, kwargs)
537
+ return rref_res.to_here()
538
+
539
+ class JitRpcOpTest:
540
+ # Call functions remotely from Script.
541
+ @dist_init
542
+ def test_all_kwargs_are_populated_by_defaults(self):
543
+ if self.rank != 0:
544
+ return
545
+
546
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
547
+
548
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
549
+ kwargs = {}
550
+
551
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
552
+ ret = script_op(
553
+ dst_worker_name, args, kwargs
554
+ )
555
+ self.assertEqual(ret, torch.tensor([10, 10]))
556
+
557
+ @dist_init
558
+ def test_some_kwargs_are_populated_by_defaults(self):
559
+ if self.rank != 0:
560
+ return
561
+
562
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
563
+
564
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
565
+ kwargs = {"first_kwarg": torch.tensor([2, 2])}
566
+
567
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
568
+ ret = script_op(
569
+ dst_worker_name, args, kwargs
570
+ )
571
+ self.assertEqual(ret, torch.tensor([9, 9]))
572
+
573
+ @dist_init
574
+ def test_no_kwargs_are_populated_by_defaults(self):
575
+ if self.rank != 0:
576
+ return
577
+
578
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
579
+
580
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
581
+ kwargs = {
582
+ "first_kwarg": torch.tensor([2, 2]),
583
+ "second_kwarg": torch.tensor([3, 3]),
584
+ }
585
+ for script_op in [script_rpc_async_call, script_rpc_sync_call, script_rpc_remote_call]:
586
+ ret = script_op(
587
+ dst_worker_name, args, kwargs
588
+ )
589
+ self.assertEqual(ret, torch.tensor([8, 8]))
590
+
591
+ @dist_init
592
+ def test_args_and_kwargs_contain_different_types(self):
593
+ if self.rank != 0:
594
+ return
595
+
596
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
597
+
598
+ @torch.jit.script
599
+ def script_rpc_async_call_with_assorted_types(
600
+ dst_worker_name: str,
601
+ ):
602
+ args = (torch.tensor([1, 1]), "str_arg", 1)
603
+ # Must annotate the value type as `Any`, because JIT type inference
604
+ # does not support multiple types when defining a Dict.
605
+ # The error JIT gives is,
606
+ # "Dict values must contain only a single type, "
607
+ # "expected: Tensor but found str instead."
608
+ kwargs: Dict[str, Any] = {
609
+ "tensor_kwarg": torch.tensor([3, 3]),
610
+ "str_kwarg": "_str_kwarg",
611
+ "int_kwarg": 3,
612
+ }
613
+ fut = rpc.rpc_async(
614
+ dst_worker_name, assorted_types_args_kwargs, args, kwargs
615
+ )
616
+ ret = fut.wait()
617
+ return ret
618
+
619
+ ret = script_rpc_async_call_with_assorted_types(
620
+ dst_worker_name
621
+ )
622
+ self.assertEqual(ret, (torch.tensor([4, 4]), "str_arg_str_kwarg", 4))
623
+
624
+ @dist_init
625
+ def test_kwargs_not_passed(self):
626
+ if self.rank != 0:
627
+ return
628
+
629
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
630
+
631
+ @torch.jit.script
632
+ def script_rpc_async_call_without_kwargs_passed(
633
+ dst_worker_name: str,
634
+ ):
635
+ args = ()
636
+ fut = rpc.rpc_async(dst_worker_name, no_arg, args)
637
+ ret = fut.wait()
638
+ return ret
639
+
640
+ ret = script_rpc_async_call_without_kwargs_passed(
641
+ dst_worker_name
642
+ )
643
+ self.assertEqual(ret, 0)
644
+
645
+ @dist_init
646
+ def test_args_kwargs_are_neither_passed(self):
647
+ if self.rank != 0:
648
+ return
649
+
650
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
651
+
652
+ @torch.jit.script
653
+ def script_rpc_async_call_without_args_kwargs_passed(
654
+ dst_worker_name: str,
655
+ ):
656
+ fut = rpc.rpc_async(dst_worker_name, no_arg)
657
+ ret = fut.wait()
658
+ return ret
659
+
660
+ ret = script_rpc_async_call_without_args_kwargs_passed(
661
+ dst_worker_name
662
+ )
663
+ self.assertEqual(ret, 0)
664
+
665
+ @dist_init
666
+ def test_less_than_needed_args_are_specified(self):
667
+ if self.rank != 0:
668
+ return
669
+
670
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
671
+
672
+ # Notice, args matching happens during scripting.
673
+ with self.assertRaisesRegex(RuntimeError, "Argument second_arg not provided"):
674
+
675
+ @torch.jit.script
676
+ def script_rpc_async_call_with_less_args(
677
+ dst_worker_name: str, # noqa: E999
678
+ ):
679
+ args = (torch.tensor([1, 1]),)
680
+ kwargs = {}
681
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
682
+ ret = fut.wait()
683
+ return ret
684
+
685
+ @dist_init
686
+ def test_more_than_needed_args_are_specified(self):
687
+ if self.rank != 0:
688
+ return
689
+
690
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
691
+
692
+ # Notice, args matching happens during scripting.
693
+ with self.assertRaisesRegex(
694
+ RuntimeError,
695
+ "Expected at most 4 arguments but found 5 positional arguments",
696
+ ):
697
+
698
+ @torch.jit.script
699
+ def script_rpc_async_call_with_more_args(
700
+ dst_worker_name: str,
701
+ ):
702
+ args = (
703
+ torch.tensor([1, 1]),
704
+ torch.tensor([2, 2]),
705
+ torch.tensor([3, 3]),
706
+ torch.tensor([4, 4]),
707
+ torch.tensor([5, 5]),
708
+ )
709
+ kwargs = {}
710
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
711
+ ret = fut.wait()
712
+ return ret
713
+
714
+ @dist_init
715
+ def test_unexepected_kwarg_is_specified(self):
716
+ if self.rank != 0:
717
+ return
718
+
719
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
720
+
721
+ # Notice, kwargs matching happens during execution.
722
+ @torch.jit.script
723
+ def script_rpc_async_call_with_unexpected_kwarg(
724
+ dst_worker_name: str, # noqa: E999
725
+ ):
726
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
727
+ kwargs = {"third_kwarg": torch.tensor([1, 1])}
728
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
729
+ ret = fut.wait()
730
+ return ret
731
+
732
+ with self.assertRaisesRegex(
733
+ RuntimeError, "Unknown keyword argument 'third_kwarg'"
734
+ ):
735
+ ret = script_rpc_async_call_with_unexpected_kwarg(
736
+ dst_worker_name
737
+ )
738
+ self.assertEqual(ret, 0)
739
+
740
+ @dist_init
741
+ def test_call_python_function_remotely_from_script_not_supported(self):
742
+ if self.rank != 0:
743
+ return
744
+
745
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
746
+
747
+ @torch.jit.script
748
+ def rpc_async_call_remote_py_function_in_torchscript(dst_worker_name: str):
749
+ args = ()
750
+ kwargs = {}
751
+ fut = rpc.rpc_async(dst_worker_name, python_function, args, kwargs)
752
+ ret = fut.wait()
753
+ return ret
754
+
755
+ with self.assertRaisesRegex(
756
+ RuntimeError, "attempted to get undefined function"
757
+ ):
758
+ ret = rpc_async_call_remote_py_function_in_torchscript(dst_worker_name)
759
+ self.assertEqual(ret, 0)
760
+
761
+ @dist_init
762
+ def test_call_script_function_that_raises_remotely_from_script(self):
763
+ if self.rank != 0:
764
+ return
765
+
766
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
767
+
768
+ # Notice, TorchScript always translates(emits) Python `raise` statement,
769
+ # as the exception message string, "Exception",
770
+ # no matter what exception type and exception message are in the statement,
771
+ @torch.jit.script
772
+ def rpc_async_call_remote_raising_torchscript_in_torchscript(
773
+ dst_worker_name: str,
774
+ ):
775
+ args = ()
776
+ kwargs = {}
777
+ fut = rpc.rpc_async(dst_worker_name, raise_script, args, kwargs)
778
+ ret = fut.wait()
779
+ return ret
780
+
781
+ with self.assertRaisesRegex(RuntimeError, "Expected error"):
782
+ ret = rpc_async_call_remote_raising_torchscript_in_torchscript(
783
+ dst_worker_name
784
+ )
785
+ self.assertEqual(ret, 0)
786
+
787
+ @dist_init
788
+ def test_call_script_function_that_not_exists_remotely_from_script(self):
789
+ if self.rank != 0:
790
+ return
791
+
792
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
793
+
794
+ @torch.jit.script
795
+ def nonexisting_script():
796
+ return 0
797
+
798
+ @torch.jit.script
799
+ def rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
800
+ dst_worker_name: str,
801
+ ):
802
+ args = ()
803
+ kwargs = {}
804
+ fut = rpc.rpc_async(dst_worker_name, nonexisting_script, args, kwargs)
805
+ ret = fut.wait()
806
+ return ret
807
+
808
+ with self.assertRaisesRegex(
809
+ RuntimeError, "attempted to get undefined function nonexisting_script"
810
+ ):
811
+ ret = rpc_async_call_remote_nonexisting_torchscript_in_torchscript(
812
+ dst_worker_name
813
+ )
814
+ self.assertEqual(ret, 0)
815
+
816
+
817
+ @torch.jit.ignore
818
+ def my_script_module_init(rank: int) -> MyModuleInterface:
819
+ return MyScriptModule(rank)
820
+
821
+
822
+ @torch.jit.script
823
+ def construct_my_script_module(rank: int) -> MyModuleInterface:
824
+ return my_script_module_init(rank)
825
+
826
+
827
+ @torch.jit.script
828
+ def run_ref_script_module(
829
+ ref_script_module: RRef[MyModuleInterface], t: Tensor
830
+ ) -> Tensor:
831
+ module = ref_script_module.to_here()
832
+ return module.forward() + t
833
+
834
+
835
+ @torch.jit.script
836
+ def script_check_rref_confirmed(rref: RRef[Tensor]) -> bool:
837
+ return rref.confirmed_by_owner()
838
+
839
+
840
+ @torch.jit.script
841
+ def save_rref(rref_var: RRef[Tensor], fname: str) -> None:
842
+ torch.save(rref_var, fname)
843
+
844
+
845
+ @torch.jit.script
846
+ def script_add(x: Tensor, y: Tensor) -> Tensor:
847
+ return x + y
848
+
849
+
850
+ @rpc.functions.async_execution
851
+ @torch.jit.script
852
+ def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]:
853
+ return rpc.rpc_async(to, script_add, (x, y))
854
+
855
+
856
+ @rpc.functions.async_execution
857
+ @torch.jit.script
858
+ def async_wrong_type() -> Tensor:
859
+ return torch.zeros(2)
860
+
861
+
862
+ def load_script_module_with_pickled_rref(pickled_script_module):
863
+ f = io.BytesIO(pickled_script_module)
864
+ m = torch.jit.load(f)
865
+ return m()
866
+
867
+
868
+ class JitRpcTest(
869
+ RRefAPITest,
870
+ RRefTypingTest,
871
+ LocalRRefTest,
872
+ JitRpcOpTest,
873
+ FutureTypingTest,
874
+ RpcAgentTestFixture,
875
+ ):
876
+ @dist_init
877
+ def test_torchscript_function(self):
878
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
879
+ local_ret = one_arg(torch.ones(2, 2))
880
+ ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
881
+ self.assertEqual(ret, local_ret)
882
+ rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
883
+ self.assertEqual(rref.to_here(), local_ret)
884
+ # create rref to itself
885
+ local_rref = rpc.remote(
886
+ worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)
887
+ )
888
+ self.assertEqual(local_rref.to_here(), local_ret)
889
+
890
+ @dist_init
891
+ def test_torchscript_function_exception(self):
892
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
893
+ with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
894
+ ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20))
895
+
896
+ with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
897
+ rref = rpc.remote(dst_worker_name, one_arg, args=(10, 20))
898
+
899
+ @dist_init
900
+ def test_torchscript_functions_not_supported(self):
901
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
902
+
903
+ my_local_script_module = MyScriptModule(self.rank)
904
+
905
+ # It is not thread safe to instantiate MyScriptModule in multiple threads,
906
+ # wait for local MyScriptModule instantiation to finish,
907
+ # otherwise it could instantiate MyScriptModule in parallel with
908
+ # server thread in the below
909
+ initialize_pg(self.file_init_method, self.rank, self.world_size)
910
+ dist.barrier()
911
+
912
+ # rpc_sync still accepts script class and run it in
913
+ # the same code path as python call.
914
+ ret = rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,))
915
+
916
+ # rpc_sync does not accept script module method.
917
+ # Python 3.5 and Python 3.6 throw different error message, the only
918
+ # common word can be greped is "pickle".
919
+ with self.assertRaisesRegex(TypeError, "pickle"):
920
+ ret = rpc.rpc_async(
921
+ dst_worker_name, my_local_script_module.forward, args=()
922
+ )
923
+
924
+ @dist_init
925
+ def test_remote_script_module(self):
926
+ # TODO, need more investigation
927
+ # there is rref leak when shutting down, suspect it is because
928
+ # ref as arg is passed to pybind boundary, and the ref is not garbage
929
+ # collected by python when calling shutdown()
930
+ import torch.distributed.rpc.api as api
931
+
932
+ api._ignore_rref_leak = True
933
+
934
+ local_ret = torch.ones(self.rank) + torch.ones(self.rank)
935
+
936
+ n = self.rank + 1
937
+ dst_rank = n % self.world_size
938
+ remote_ref = rpc.remote(
939
+ worker_name(dst_rank), construct_my_script_module, args=(self.rank,)
940
+ )
941
+
942
+ # pass rref arg to owner
943
+ ret = rpc.rpc_sync(
944
+ worker_name(dst_rank),
945
+ run_ref_script_module,
946
+ args=(remote_ref, torch.ones(self.rank)),
947
+ )
948
+ self.assertEqual(ret, local_ret)
949
+
950
+ # pass rref arg to self/user
951
+ with self.assertRaisesRegex(
952
+ RuntimeError,
953
+ "is an RRef to a ScriptModule. It can't be sent through RPC from owner,",
954
+ ):
955
+ ret = rpc.rpc_sync(
956
+ worker_name(self.rank),
957
+ run_ref_script_module,
958
+ args=(remote_ref, torch.ones(self.rank)),
959
+ )
960
+
961
+ @dist_init
962
+ def test_create_script_module_on_remote(self):
963
+ dst_name = worker_name((self.rank + 1) % self.world_size)
964
+ # Construct on remote end with rpc_sync
965
+ created_script_module = rpc.rpc_sync(
966
+ dst_name, MyScriptModule, args=(self.rank,)
967
+ )
968
+ # Forward should output a ones tensor of self.rank.
969
+ self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule))
970
+ rank_ones_tensor = created_script_module()
971
+ self.assertEqual(torch.ones(self.rank), rank_ones_tensor)
972
+
973
+ # Construct ScriptModule with rpc.remote.
974
+ remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,))
975
+ # Verify it is an instance of ScriptModule on remote end.
976
+ remote_end_is_script = rpc.rpc_sync(
977
+ remote_script_module.owner(),
978
+ rref_isinstance,
979
+ args=(remote_script_module, torch.jit.ScriptModule),
980
+ )
981
+ self.assertTrue(remote_end_is_script)
982
+ # Run forward pass remotely.
983
+ remote_forward_output = remote_script_module.rpc_sync().forward()
984
+ self.assertEqual(remote_forward_output, torch.ones(self.rank))
985
+ # Run function defined on ScriptModule remotely.
986
+ remote_func_output = remote_script_module.rpc_sync().custom_func()
987
+ self.assertEqual(remote_func_output, torch.ones(self.rank))
988
+ # Ensure we can transfer ScriptModule RRef to this rank and run
989
+ # forward pass.
990
+ local_script_module = remote_script_module.to_here()
991
+ self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule))
992
+ rank_ones_tensor = local_script_module()
993
+ self.assertEqual(rank_ones_tensor, torch.ones(self.rank))
994
+ local_script_func_output = local_script_module.custom_func()
995
+ self.assertEqual(local_script_func_output, torch.ones(self.rank))
996
+
997
+ @dist_init
998
+ def test_load_script_module_with_pickled_rref(self):
999
+ dst_name = worker_name((self.rank + 1) % self.world_size)
1000
+ m1 = MyScriptModuleWithRRefs(dst_name)
1001
+ m2 = MyScriptModuleWithRRefs(dst_name)
1002
+
1003
+ f = io.BytesIO()
1004
+
1005
+ rpc._enable_jit_rref_pickle()
1006
+ torch.jit.save(m1, f)
1007
+ rpc._disable_jit_rref_pickle()
1008
+
1009
+ out1 = rpc.rpc_sync(
1010
+ dst_name,
1011
+ load_script_module_with_pickled_rref,
1012
+ args=(f.getvalue(),)
1013
+ )
1014
+ out2 = m2()
1015
+ self.assertEqual(out1, out2)
1016
+
1017
+ @dist_init
1018
+ def test_rref_jit_pickle_not_supported(self):
1019
+ n = self.rank + 1
1020
+ dst_rank = n % self.world_size
1021
+ rref_var = rpc_return_rref(worker_name(dst_rank))
1022
+ with TemporaryFileName() as fname:
1023
+ with self.assertRaisesRegex(
1024
+ RuntimeError, "RRef jit pickling is only allowed inside RPC calls"
1025
+ ):
1026
+ save_rref(rref_var, fname)
1027
+
1028
+ @dist_init
1029
+ def test_remote_script_throw(self):
1030
+ rref = rpc.remote(
1031
+ worker_name((self.rank + 1) % self.world_size),
1032
+ script_raise_func,
1033
+ args=(torch.ones(2),),
1034
+ )
1035
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1036
+ rref.to_here()
1037
+
1038
+ @dist_init
1039
+ def test_remote_script_udf(self):
1040
+ rref = rpc.remote(
1041
+ worker_name((self.rank + 1) % self.world_size),
1042
+ script_fork_wait_udf,
1043
+ args=(torch.ones(2),),
1044
+ )
1045
+ self.assertEqual(rref.to_here(), torch.ones(2) * 2)
1046
+
1047
+ @dist_init
1048
+ def test_async_script_udf(self):
1049
+ future = rpc.rpc_async(
1050
+ worker_name((self.rank + 1) % self.world_size),
1051
+ script_fork_wait_udf,
1052
+ args=(torch.ones(2),),
1053
+ )
1054
+ self.assertEqual(future.wait(), torch.ones(2) * 2)
1055
+
1056
+ @dist_init
1057
+ def test_callback_simple(self):
1058
+ def callback(fut):
1059
+ return fut.wait() + 1
1060
+
1061
+ future = rpc.rpc_async(
1062
+ worker_name((self.rank + 1) % self.world_size),
1063
+ script_fork_wait_udf,
1064
+ args=(torch.ones(2),),
1065
+ ).then(callback)
1066
+ self.assertEqual(future.wait(), torch.ones(2) * 2 + 1)
1067
+
1068
+ @dist_init
1069
+ def test_callback_chain(self):
1070
+ n = self.rank + 1
1071
+ dst = worker_name(n % self.world_size)
1072
+
1073
+ def callback(fut):
1074
+ return fut.wait() + 1
1075
+
1076
+ fut = rpc.rpc_async(
1077
+ worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),)
1078
+ )
1079
+
1080
+ num_cbs = 20
1081
+ for _ in range(num_cbs):
1082
+ fut = fut.then(callback)
1083
+
1084
+ self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
1085
+
1086
+ @dist_init
1087
+ def test_add_done_callback(self):
1088
+ callback_called = None
1089
+
1090
+ def callback(fut):
1091
+ nonlocal callback_called
1092
+ callback_called = fut.wait() * 2
1093
+
1094
+ future = rpc.rpc_async(
1095
+ worker_name((self.rank + 1) % self.world_size),
1096
+ script_fork_wait_udf,
1097
+ args=(torch.ones(2),),
1098
+ )
1099
+
1100
+ future.add_done_callback(callback)
1101
+ future_then = future.then(lambda _: True)
1102
+
1103
+ self.assertEqual(future.wait(), torch.ones(2) * 2)
1104
+
1105
+ # We have no guarantee that the add_done_callback fn will execute before the test finishes.
1106
+ # Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
1107
+ future_then.wait()
1108
+ self.assertEqual(callback_called, torch.ones(2) * 4)
1109
+
1110
+ @dist_init
1111
+ def test_async_script_throw(self):
1112
+ future = rpc.rpc_async(
1113
+ worker_name((self.rank + 1) % self.world_size),
1114
+ script_fork_wait_throw,
1115
+ args=(torch.ones(2),),
1116
+ )
1117
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1118
+ future.wait()
1119
+
1120
+ @dist_init
1121
+ def test_callback_with_exception(self):
1122
+ def callback(fut):
1123
+ with self.assertRaisesRegex(Exception, ".*Expected error.*"):
1124
+ fut.wait()
1125
+ raise RuntimeError("Another expected error")
1126
+
1127
+ future = rpc.rpc_async(
1128
+ worker_name((self.rank + 1) % self.world_size),
1129
+ script_fork_wait_throw,
1130
+ args=(torch.ones(2),),
1131
+ ).then(callback)
1132
+
1133
+ with self.assertRaisesRegex(RuntimeError, "Another expected error"):
1134
+ future.wait()
1135
+
1136
+ @dist_init
1137
+ def test_call_rpc_with_profiling(self):
1138
+ # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
1139
+ # future from within a script function that calls rpc_async
1140
+ if self.rank == 0:
1141
+ with _profile() as prof:
1142
+ prof_key = _build_rpc_profiling_key(
1143
+ RPCExecMode.ASYNC,
1144
+ torch._jit_internal._qualified_name(one_arg),
1145
+ "worker0",
1146
+ "worker1",
1147
+ )
1148
+ with torch.autograd.profiler.record_function(prof_key) as rf:
1149
+ ret = call_rpc_with_profiling(rf.record, "worker1")
1150
+ # TODO: Can't get a reliable time for this profiling event since
1151
+ # it's hard to estimate the execution time on the remote end for non-UDFs.
1152
+ # This can be resolved by https://github.com/pytorch/pytorch/issues/36272.
1153
+ # After that, this test should be modified to validate the function time.
1154
+ events = prof.function_events
1155
+ function_event = get_function_event(events, prof_key)
1156
+ self.assertTrue(torch._jit_internal._qualified_name(one_arg) in function_event.name)
1157
+
1158
+ @dist_init
1159
+ def test_rpc_async_jit_profiled(self):
1160
+ # Tests that rpc_async calls made from within a TorchScript function are
1161
+ # profiled.
1162
+ if self.rank == 0:
1163
+ dst_rank = (self.rank + 1) % self.world_size
1164
+ dst_worker_name = worker_name(dst_rank)
1165
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
1166
+ kwargs = {}
1167
+ with _profile() as prof:
1168
+ script_rpc_async_call(
1169
+ dst_worker_name, args, kwargs
1170
+ )
1171
+
1172
+ # Ensure rpc_async call is profiled
1173
+ function_events = prof.function_events
1174
+ qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs)
1175
+ rpc_async_jit_event = [
1176
+ event
1177
+ for event in function_events
1178
+ if qual_name in event.name and event.node_id == self.rank
1179
+ ]
1180
+ self.assertEqual(len(rpc_async_jit_event), 1)
1181
+ rpc_async_jit_event = rpc_async_jit_event[0]
1182
+ profiled_name = _build_rpc_profiling_key(
1183
+ RPCExecMode.ASYNC_JIT,
1184
+ qual_name,
1185
+ worker_name(self.rank),
1186
+ dst_worker_name,
1187
+ )
1188
+ self.assertEqual(profiled_name, rpc_async_jit_event.name)
1189
+ remote_events = [event for event in function_events if event.is_remote]
1190
+ # All remote events should have taken place on dst_rank
1191
+ remote_event_node_ids = {
1192
+ remote_event.node_id for remote_event in remote_events
1193
+ }
1194
+ self.assertEqual(remote_event_node_ids, {dst_rank})
1195
+ # script_rpc_async_call invokes add operator
1196
+ # so we should see this as a remote event.
1197
+ remote_add = next(
1198
+ remote_event
1199
+ for remote_event in remote_events
1200
+ if "aten::add" in remote_event.name
1201
+ )
1202
+ remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add"
1203
+ self.assertEqual(remote_add.name, remote_add_profiled_name)
1204
+
1205
+ @dist_init
1206
+ def test_record_function_on_caller_rpc_async(self):
1207
+ if self.rank == 0:
1208
+ dst_rank = (self.rank + 1) % self.world_size
1209
+ dst_worker_name = worker_name(dst_rank)
1210
+ block_scope = "foo"
1211
+ with _profile() as prof:
1212
+ # Runs 2 rpc_async calls within JIT under record_function.
1213
+ record_function_on_caller_rpc_async(dst_worker_name, block_scope)
1214
+
1215
+ # Ensure record_function event is profiled.
1216
+ function_events = prof.function_events
1217
+ record_function_scope_event = [
1218
+ event for event in function_events if event.name == block_scope
1219
+ ]
1220
+ self.assertEqual(1, len(record_function_scope_event))
1221
+ record_function_scope_event = record_function_scope_event[0]
1222
+ # Ensure RPC future is profiled.
1223
+ expected_key = _build_rpc_profiling_key(
1224
+ RPCExecMode.ASYNC_JIT,
1225
+ torch._jit_internal._qualified_name(script_add_ones),
1226
+ worker_name(self.rank),
1227
+ dst_worker_name,
1228
+ )
1229
+ jit_rpc_events = [
1230
+ event for event in function_events if event.name == expected_key
1231
+ ]
1232
+ self.assertEqual(2, len(jit_rpc_events))
1233
+ # Validate that the record_function scope time is greater than both
1234
+ # of the individual RPC async call times. The reason it is not necessarily
1235
+ # greater than the sum is because the two can execute in parallel.
1236
+ for jit_rpc_event in jit_rpc_events:
1237
+ self.assertTrue(
1238
+ record_function_scope_event.cpu_time_total
1239
+ > jit_rpc_event.cpu_time_total
1240
+ )
1241
+
1242
+ @dist_init
1243
+ def test_rpc_torchscript_record_function(self):
1244
+ # tests that torchscript functions can be profiled using with
1245
+ # record_function(...) over RPC.
1246
+ REMOTE_OP_STR = "#remote_op: "
1247
+ if self.rank == 0:
1248
+ dst_rank = (self.rank + 1) % self.world_size
1249
+ dst_worker_name = worker_name(dst_rank)
1250
+ block_scope = "foo"
1251
+ with _profile() as prof:
1252
+ call_rpc_torchscript_with_record_function(dst_worker_name, block_scope)
1253
+
1254
+ # Need to call below to populate CPU children.
1255
+ prof.key_averages()
1256
+ function_events = prof.function_events
1257
+ expected_key = (
1258
+ _build_rpc_profiling_key(
1259
+ RPCExecMode.ASYNC_JIT,
1260
+ torch._jit_internal._qualified_name(
1261
+ script_add_ones_with_record_function
1262
+ ),
1263
+ worker_name(self.rank),
1264
+ dst_worker_name,
1265
+ )
1266
+ + REMOTE_OP_STR
1267
+ + block_scope
1268
+ )
1269
+ remote_record_function_event = next(
1270
+ evt for evt in function_events if evt.name == expected_key
1271
+ )
1272
+ self.assertTrue(block_scope in remote_record_function_event.name)
1273
+ remote_children = remote_record_function_event.cpu_children
1274
+ self.assertTrue("aten::add" in child.name for child in remote_children)
1275
+
1276
+ def test_record_function_jit_end_callbacks_with_fork(self):
1277
+ # Ensures that we can call rf._call_end_callbacks_on_future on a jit
1278
+ # future in python eager mode with torch.jit.fork
1279
+ sleep_interval = 1
1280
+ with _profile() as prof:
1281
+ with torch.autograd.profiler.record_function("foo") as rf:
1282
+ fut = torch.jit._fork(sleep, sleep_interval)
1283
+ rf._call_end_callbacks_on_future(fut)
1284
+ fut.wait()
1285
+
1286
+ function_events = prof.function_events
1287
+ sleep_event = get_function_event(function_events, "foo")
1288
+ self.assertEqual(sleep_event.name, "foo")
1289
+ # Validate that callbacks were fired at the right time by checking the
1290
+ # profiling event cpu time
1291
+ self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval)
1292
+
1293
+ def test_call_fork_in_jit_with_profiling(self):
1294
+ # Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
1295
+ # future from within a script function with torch.jit.fork
1296
+ with _profile() as prof:
1297
+ with torch.autograd.profiler.record_function("foo") as rf:
1298
+ ret = call_fork_with_profiling(rf.record)
1299
+
1300
+ events = prof.function_events
1301
+ function_event = get_function_event(events, "foo")
1302
+ self.assertEqual(function_event.name, "foo")
1303
+
1304
+ @dist_init
1305
+ def test_async_function_simple(self):
1306
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1307
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1308
+
1309
+ ret = rpc.rpc_sync(
1310
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
1311
+ )
1312
+ self.assertEqual(ret, torch.ones(2, 2) + 1)
1313
+
1314
+ @dist_init
1315
+ def test_async_function_wrong_return_type(self):
1316
+ with self.assertRaisesRegex(
1317
+ RuntimeError,
1318
+ "Async functions must return an IValue of Future type, but got Tensor",
1319
+ ):
1320
+ rpc.rpc_sync(
1321
+ worker_name((self.rank + 1) % self.world_size), async_wrong_type
1322
+ )
1323
+
1324
+ @dist_init
1325
+ def test_async_function_wrong_decorator_order(self):
1326
+ # @torch.jit.script complains about undefined value rpc. Error is shown
1327
+ # below. The reason for not checking error string is to avoid making
1328
+ # JIT error handling code depend on RPC tests, as we don't have any
1329
+ # restrictions on the error message here.
1330
+ #
1331
+ # RuntimeError:
1332
+ # undefined value rpc:
1333
+ # def async_wrong_decorator_order(to, x, y):
1334
+ # # type: (str, Tensor, Tensor) -> Future[Tensor]
1335
+ # return rpc.rpc_async(to, script_add, (x, y))
1336
+ # ~~~ <--- HERE
1337
+ with self.assertRaises(RuntimeError):
1338
+
1339
+ @torch.jit.script
1340
+ @rpc.functions.async_execution
1341
+ def async_wrong_decorator_order(
1342
+ to: str, x: Tensor, y: Tensor
1343
+ ) -> Future[Tensor]:
1344
+ return rpc.rpc_async(to, script_add, (x, y))
1345
+
1346
+ @dist_init
1347
+ def test_async_function_remote(self):
1348
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1349
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1350
+
1351
+ rref = rpc.remote(
1352
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
1353
+ )
1354
+ self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
1355
+
1356
+ @dist_init
1357
+ def test_async_function_remote_multi(self):
1358
+ dst1 = worker_name((self.rank + 1) % self.world_size)
1359
+ dst2 = worker_name((self.rank + 2) % self.world_size)
1360
+
1361
+ num = 20
1362
+ rrefs = []
1363
+ for i in range(num):
1364
+ rrefs.append(
1365
+ rpc.remote(
1366
+ dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i)
1367
+ )
1368
+ )
1369
+
1370
+ for i in range(num):
1371
+ self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i)
1372
+
1373
+ @dist_init
1374
+ def test_async_function_wrong_return_type_remote(self):
1375
+ rref = rpc.remote(
1376
+ worker_name((self.rank + 1) % self.world_size), async_wrong_type
1377
+ )
1378
+
1379
+ with self.assertRaisesRegex(
1380
+ RuntimeError,
1381
+ "Async functions must return an IValue of Future type, but got Tensor",
1382
+ ):
1383
+ rref.to_here()
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/distributed/rpc/jit/rpc_test_faulty.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, Tuple
2
+
3
+ import torch
4
+ import torch.distributed.rpc as rpc
5
+ from torch import Tensor
6
+ from torch.distributed.rpc import RRef
7
+ from torch.testing._internal.dist_utils import (
8
+ dist_init,
9
+ worker_name,
10
+ wait_until_pending_futures_and_users_flushed
11
+ )
12
+ from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
13
+ RpcAgentTestFixture,
14
+ )
15
+
16
+
17
+ @torch.jit.script
18
+ def two_args_two_kwargs(
19
+ first_arg,
20
+ second_arg,
21
+ first_kwarg=torch.tensor([3, 3]),
22
+ second_kwarg=torch.tensor([4, 4]),
23
+ ):
24
+ return first_arg + second_arg + first_kwarg + second_kwarg
25
+
26
+
27
+ @torch.jit.script
28
+ def script_rpc_async_call(
29
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
30
+ ):
31
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
32
+ ret = fut.wait()
33
+ return ret
34
+
35
+
36
+ @torch.jit.script
37
+ def rpc_async_call_with_timeout(
38
+ dst_worker_name: str,
39
+ args: Tuple[Tensor, Tensor],
40
+ kwargs: Dict[str, Tensor],
41
+ timeout: float,
42
+ ):
43
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
44
+ ret = fut.wait()
45
+ return ret
46
+
47
+
48
+ @torch.jit.script
49
+ def rpc_async_call_with_timeout_future_ret(
50
+ dst_worker_name: str,
51
+ args: Tuple[Tensor, Tensor],
52
+ kwargs: Dict[str, Tensor],
53
+ timeout: float,
54
+ ):
55
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs, timeout)
56
+ return fut
57
+
58
+
59
+ @torch.jit.script
60
+ def rpc_async_call_future_ret(
61
+ dst_worker_name: str, args: Tuple[Tensor, Tensor], kwargs: Dict[str, Tensor]
62
+ ):
63
+ fut = rpc.rpc_async(dst_worker_name, two_args_two_kwargs, args, kwargs)
64
+ return fut
65
+
66
+ @torch.jit.script
67
+ def rref_to_here(rref_var: RRef[Tensor]) -> Tensor:
68
+ return rref_var.to_here()
69
+
70
+ @torch.jit.script
71
+ def rref_to_here_with_timeout(rref_var: RRef[Tensor], timeout: float) -> Tensor:
72
+ return rref_var.to_here(timeout)
73
+
74
+ @torch.jit.script
75
+ def rpc_async_with_rref_arg(dst_worker_name: str, args: Tuple[RRef[Tensor]]) -> Tensor:
76
+ fut = rpc.rpc_async(dst_worker_name, rref_to_here, args)
77
+ ret = fut.wait()
78
+ return ret
79
+
80
+
81
+ class JitFaultyAgentRpcTest(RpcAgentTestFixture):
82
+ """
83
+ Run tests for rpc_async in JIT under the faulty agent test fixture to test
84
+ arbitrary timeouts.
85
+ """
86
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
87
+ def test_timeout_in_torchscript_function(self):
88
+ # Call rpc_async + fut.wait() in torchscript function and ensure that
89
+ # timeout is raised.
90
+ if self.rank != 0:
91
+ return
92
+
93
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
94
+
95
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
96
+ kwargs = {
97
+ "first_kwarg": torch.tensor([2, 2]),
98
+ "second_kwarg": torch.tensor([3, 3]),
99
+ }
100
+ expected_error = self.get_timeout_error_regex()
101
+ # Ensure that we get a timeout if we override the default timeout and
102
+ # the RPC takes longer to execute.
103
+ with self.assertRaisesRegex(RuntimeError, expected_error):
104
+ rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0.5)
105
+
106
+ # Ensure that we timeout if we don't specify a timeout but the default
107
+ # is less than the RPC takes to execute.
108
+ rpc._set_rpc_timeout(0.001)
109
+ with self.assertRaisesRegex(RuntimeError, expected_error):
110
+ script_rpc_async_call(
111
+ dst_worker_name, args, kwargs
112
+ )
113
+
114
+ # Ensure that we run to completion if zero timeout is specified.
115
+ ret = rpc_async_call_with_timeout(dst_worker_name, args, kwargs, 0)
116
+ self.assertEqual(ret, torch.tensor([8, 8]))
117
+ # reset for clean shutdown
118
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
119
+
120
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_CALL": 1.5})
121
+ def test_timeout_in_python(self):
122
+ # Ensures timeouts are raised if we call rpc_async from within a
123
+ # torchscript function, but wait on the future in python.
124
+ if self.rank != 0:
125
+ return
126
+
127
+ dst_worker_name = worker_name((self.rank + 1) % self.world_size)
128
+ args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
129
+ kwargs = {
130
+ "first_kwarg": torch.tensor([2, 2]),
131
+ "second_kwarg": torch.tensor([3, 3]),
132
+ }
133
+ expected_error = self.get_timeout_error_regex()
134
+
135
+ fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0.5)
136
+ with self.assertRaisesRegex(RuntimeError, expected_error):
137
+ fut.wait()
138
+
139
+ # Ensure timeout if we don't specify but the default is less than the
140
+ # RPC takes to execute.
141
+ rpc._set_rpc_timeout(0.001)
142
+ fut = rpc_async_call_future_ret(dst_worker_name, args, kwargs)
143
+ with self.assertRaisesRegex(RuntimeError, expected_error):
144
+ fut.wait()
145
+
146
+ # Ensure run to completion if zero timeout is specified
147
+ fut = rpc_async_call_with_timeout_future_ret(dst_worker_name, args, kwargs, 0)
148
+ result = fut.wait()
149
+ self.assertEqual(result, torch.tensor([8, 8]))
150
+ # reset for clean shutdown
151
+ rpc._set_rpc_timeout(rpc.constants.DEFAULT_RPC_TIMEOUT_SEC)
152
+
153
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
154
+ def test_remote_timeout_to_here_in_jit(self):
155
+ # Test that calling to_here() in JIT will raise timeout error if
156
+ # rpc.remote failed.
157
+ if self.rank != 0:
158
+ return
159
+ dst_rank = (self.rank + 1) % self.world_size
160
+ dst_worker = f"worker{dst_rank}"
161
+ rref = rpc.remote(
162
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
163
+ )
164
+ # Will ensure error handling callbacks are run.
165
+ wait_until_pending_futures_and_users_flushed()
166
+ # Call to_here() within a ScriptFunction and ensure it raises
167
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
168
+ rref_to_here(rref)
169
+
170
+ @dist_init(faulty_messages=[], messages_to_delay={"SCRIPT_RREF_FETCH_CALL": 1})
171
+ def test_rref_to_here_timeout_in_jit(self):
172
+ if self.rank != 0:
173
+ return
174
+
175
+ dst_rank = (self.rank + 1) % self.world_size
176
+ dst_worker = f"worker{dst_rank}"
177
+ rref = rpc.remote(
178
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
179
+ )
180
+ expected_error = self.get_timeout_error_regex()
181
+ with self.assertRaisesRegex(RuntimeError, expected_error):
182
+ rref_to_here_with_timeout(rref, 0.01)
183
+
184
+ rref_to_here_with_timeout(rref, 100)
185
+
186
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
187
+ def test_rref_timeout_pickle_in_jit(self):
188
+ if self.rank != 0:
189
+ return
190
+ dst_rank = (self.rank + 1) % self.world_size
191
+ dst_worker = f"worker{dst_rank}"
192
+ rref = rpc.remote(
193
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
194
+ )
195
+ # Will ensure error handling callbacks are run.
196
+ wait_until_pending_futures_and_users_flushed()
197
+ # Call RPC with RRef arg in JIT, which will go through JIT pickling and
198
+ # ensure error is raised.
199
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
200
+ rpc_async_with_rref_arg(dst_worker, (rref, ))
201
+
202
+ @dist_init(faulty_messages=["SCRIPT_REMOTE_CALL"])
203
+ def test_rref_timeout_pickle_script_func(self):
204
+ # Similar to above test, but calls python rpc with script function.
205
+ if self.rank != 0:
206
+ return
207
+ dst_rank = (self.rank + 1) % self.world_size
208
+ dst_worker = f"worker{dst_rank}"
209
+ rref = rpc.remote(
210
+ dst_worker, torch.add, args=(torch.tensor(1), torch.tensor(1))
211
+ )
212
+ # Will ensure error handling callbacks are run.
213
+ wait_until_pending_futures_and_users_flushed()
214
+ # Call RPC with script function that takes RRef, ensure timeout during pickling
215
+ with self.assertRaisesRegex(RuntimeError, "RRef creation"):
216
+ rpc.rpc_sync(dst_worker, rref_to_here, args=(rref, ))
env-llmeval/lib/python3.10/site-packages/torch/testing/_internal/opinfo/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import torch.testing._internal.opinfo.core
2
+ import torch.testing._internal.opinfo.definitions