applied-ai-018 commited on
Commit
b2b1b13
·
verified ·
1 Parent(s): eeb0c04

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/_utils.py +57 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/nn/attention/bias.py +353 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/_functions.py +126 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/comm.py +236 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py +269 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/distributed.py +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py +110 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/replicate.py +186 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py +107 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py +1 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py +9 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py +10 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py +11 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__init__.py +40 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py +1 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py +31 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py +19 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py +12 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py +17 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py +13 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py +15 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py +1 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/linear.py +10 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/functional.py +10 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/activation.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__init__.py +32 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py +45 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py +9 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc ADDED
Binary file (177 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.12 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/_utils.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Defines utilities for interacting with scaled_dot_product_attention"""
2
+ import math
3
+ from typing import List, Optional
4
+
5
+ import torch
6
+
7
+ __all__: List[str] = []
8
+
9
+
10
+ def _input_requires_grad(*tensors: torch.Tensor) -> bool:
11
+ """Returns True if any of the tensors requires grad"""
12
+ return any(t.requires_grad for t in tensors)
13
+
14
+
15
+ def _postprocess_flash_output(inpt_tensor: torch.Tensor, og_size: int) -> torch.Tensor:
16
+ """Handles the unpad of the last dimension"""
17
+ if inpt_tensor.size(-1) != og_size:
18
+ return inpt_tensor[..., :og_size]
19
+ return inpt_tensor
20
+
21
+
22
+ def _calculate_scale(head_dim_size: int, scale: Optional[float]) -> float:
23
+ """
24
+ For FlashAttention we pad the head dimension to be a multiple of 8 so we need to scale the output
25
+ by the original head size and not the padded.
26
+ """
27
+ if scale is not None:
28
+ return scale
29
+ return 1.0 / math.sqrt(head_dim_size)
30
+
31
+
32
+ def _validate_sdpa_input(
33
+ query: torch.Tensor,
34
+ key: torch.Tensor,
35
+ value: torch.Tensor,
36
+ attn_mask: Optional[torch.Tensor] = None,
37
+ dropout_p=0.0,
38
+ is_causal=False,
39
+ scale=None,
40
+ ):
41
+ if query.dtype != key.dtype or query.dtype != value.dtype:
42
+ raise ValueError(
43
+ f"Expected query, key, and value to have the same dtype, "
44
+ f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
45
+ f"and value.dtype: {value.dtype} instead."
46
+ )
47
+ if query.device != key.device or query.device != value.device:
48
+ raise ValueError(
49
+ f"Expected query, key, and value to have the same device type, "
50
+ f"but got query.device: {query.device}, key.device: {key.device}, "
51
+ f"and value.device: {value.device} instead."
52
+ )
53
+ if query.dim() < 2 or key.dim() < 2 or value.dim() < 2:
54
+ raise ValueError(
55
+ f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: "
56
+ f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
57
+ )
llmeval-env/lib/python3.10/site-packages/torch/nn/attention/bias.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Defines bias subclasses that work with scaled_dot_product_attention"""
2
+ from enum import auto, IntEnum
3
+ from typing import Optional
4
+ from warnings import warn
5
+
6
+ import torch
7
+ from torch.backends.cuda import (
8
+ can_use_efficient_attention,
9
+ can_use_flash_attention,
10
+ SDPAParams,
11
+ )
12
+ from torch.nn.attention import _raise_kernel_warnings
13
+ from torch.nn.attention._utils import (
14
+ _calculate_scale,
15
+ _input_requires_grad,
16
+ _postprocess_flash_output,
17
+ _validate_sdpa_input,
18
+ )
19
+ from torch.nn.functional import scaled_dot_product_attention
20
+
21
+ __all__ = ["causal_upper_left", "causal_lower_right", "CausalVariant", "CausalBias"]
22
+
23
+
24
+ torch._dynamo.allow_in_graph(can_use_flash_attention)
25
+ torch._dynamo.allow_in_graph(can_use_efficient_attention)
26
+ torch._dynamo.allow_in_graph(SDPAParams)
27
+
28
+
29
+ class CausalVariant(IntEnum):
30
+ r"""
31
+ Enum for causal variants used in attention mechanisms.
32
+
33
+ Defines two types of causal biases:
34
+
35
+ `UPPER_LEFT`: Represents upper-left triangular bias for standard causal attention.
36
+ The equivalent pytorch code for constructing this bias is:
37
+
38
+ .. code-block:: python
39
+
40
+ torch.tril(torch.ones(size, dtype=torch.bool))
41
+
42
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
43
+
44
+ .. code-block:: text
45
+
46
+ [[1, 0, 0, 0],
47
+ [1, 1, 0, 0],
48
+ [1, 1, 1, 0]]
49
+
50
+
51
+ `LOWER_RIGHT`: Represents lower-right triangular bias, the include values are aligned to the lower
52
+ right corner of the matrix.
53
+
54
+ The equivalent pytorch code for constructing this bias is:
55
+
56
+ .. code-block:: python
57
+
58
+ diagonal_offset = size[1] - size[0]
59
+ torch.tril(
60
+ torch.ones(size, dtype=torch.bool),
61
+ diagonal=diagonal_offset,
62
+ )
63
+
64
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
65
+
66
+ .. code-block:: text
67
+
68
+ [[1, 1, 0, 0],
69
+ [1, 1, 1, 0],
70
+ [1, 1, 1, 1]]
71
+
72
+ Note that these variants are equivalent to each other when the sequence lengths of the query and key/value
73
+ tensors are equal since the triangular matrix is square.
74
+
75
+ .. warning:: This enum is a prototype and subject to change.
76
+ """
77
+
78
+ UPPER_LEFT = auto()
79
+ LOWER_RIGHT = auto()
80
+
81
+
82
+ class CausalBias(torch.Tensor):
83
+ """
84
+ A bias representing causal attention patterns. For an overview of the bias structure, see the :class:`CausalVariant` enum.
85
+
86
+ This class is used for defining causal (triangular) attention biases. For construing the bias, there exist
87
+ two factory functions: :func:`causal_upper_left` and :func:`causal_lower_right`.
88
+
89
+ Example:
90
+
91
+ .. code-block:: python
92
+
93
+ from torch.nn.attention.bias import causal_lower_right
94
+
95
+ bsz, num_heads, seqlen_q, seqlen_kv, head_dim = 32, 8, 4, 12, 8
96
+
97
+ # Create a lower-right causal bias
98
+ attn_bias = causal_lower_right(seqlen_q, seqlen_kv)
99
+
100
+ q = torch.randn(bsz, num_heads, seqlen_q, head_dim, device="cuda", dtype=torch.float16)
101
+ k = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
102
+ v = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16)
103
+
104
+ out = F.scaled_dot_product_attention(q, k, v, attn_bias)
105
+
106
+ .. warning:: This class is a prototype and subject to change.
107
+ """
108
+
109
+ def __init__(self, variant: CausalVariant, seq_len_q: int, seq_len_kv: int):
110
+ """
111
+ Initializes the CausalBias instance with a specified variant and sequence lengths.
112
+
113
+ Args:
114
+ variant (CausalVariant): The type of causal bias to use (either UPPER_LEFT or LOWER_RIGHT).
115
+ seq_len_q (int): The sequence length of the query tensor.
116
+ seq_len_kv (int): The sequence length of the key/value tensor.
117
+
118
+ Raises a warning if the LOWER_RIGHT variant is used with seq_len_q > seq_len_kv, as it may produce NaNs.
119
+ """
120
+ assert isinstance(variant, CausalVariant)
121
+ self.variant = variant
122
+ self.seq_len_q = seq_len_q
123
+ self.seq_len_kv = seq_len_kv
124
+ if seq_len_q > seq_len_kv and variant == CausalVariant.LOWER_RIGHT:
125
+ warn(
126
+ "Lower right causal bias will produce NaNs in the output when seq_len_q > seq_len_kv!"
127
+ )
128
+
129
+ def _upper_left(self, device: torch.device) -> torch.Tensor:
130
+ """Upper left causal bias"""
131
+ return torch.tril(
132
+ torch.ones(self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool)
133
+ )
134
+
135
+ def _lower_right(self, device: torch.device) -> torch.Tensor:
136
+ """Lower right causal bias"""
137
+ diagonal_offset = self.seq_len_kv - self.seq_len_q
138
+ return torch.tril(
139
+ torch.ones(
140
+ self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool
141
+ ),
142
+ diagonal=diagonal_offset,
143
+ )
144
+
145
+ def _materialize(self, device: Optional[torch.device] = None) -> torch.Tensor:
146
+ """
147
+ Materializes the causal bias into a tensor form.
148
+
149
+ Depending on the variant, this method generates either an upper-left or lower-right
150
+ triangular matrix to represent the causal bias.
151
+
152
+ Args:
153
+ device (Optional[torch.device]): The device on which to create the tensor. Defaults to CPU.
154
+
155
+ Returns:
156
+ torch.Tensor: The materialized bias tensor.
157
+ """
158
+ if device is None:
159
+ device = torch.device("cpu")
160
+ if self.variant == CausalVariant.UPPER_LEFT:
161
+ return self._upper_left(device)
162
+ elif self.variant == CausalVariant.LOWER_RIGHT:
163
+ return self._lower_right(device)
164
+
165
+ @staticmethod
166
+ def _dispatch(
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attn_mask: "CausalBias",
171
+ dropout_p: float = 0.0,
172
+ is_causal: bool = False,
173
+ scale: Optional[float] = None,
174
+ ) -> torch.Tensor:
175
+ r"""
176
+ Handles the logic for computing attention with the specified causal bias.
177
+
178
+ Args:
179
+ query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`.
180
+ key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`.
181
+ value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`.
182
+ attn_mask (CausalBias): The type of causal attention to apply.
183
+ A boolean mask where a value of True indicates that the element *should* take part in attention.
184
+ A float mask of the same type as query, key, value that is added to the attention score.
185
+ dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied
186
+ is_causal (bool): If true, assumes upper left causal attention masking and errors if both attn_mask and is_causal
187
+ are set.
188
+ scale (optional float): Scaling factor applied prior to softmax. If None, the default value is set
189
+ to :math:`\frac{1}{\sqrt{E}}`.
190
+
191
+ Returns:
192
+ output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`.
193
+
194
+ Raises:
195
+ ValueError: If the causal bias variant is not a CausalVariant type.
196
+
197
+ """
198
+ if is_causal:
199
+ raise ValueError("CausalBias should not be used with causal=True")
200
+
201
+ if (
202
+ attn_mask.seq_len_q == attn_mask.seq_len_kv
203
+ or attn_mask.variant == CausalVariant.UPPER_LEFT
204
+ ):
205
+ return scaled_dot_product_attention(
206
+ query,
207
+ key,
208
+ value,
209
+ attn_mask=None,
210
+ dropout_p=dropout_p,
211
+ is_causal=True,
212
+ scale=scale,
213
+ )
214
+ elif attn_mask.variant == CausalVariant.LOWER_RIGHT:
215
+ _validate_sdpa_input(query, key, value, None, dropout_p, is_causal, scale)
216
+ sdpa_params = SDPAParams(query, key, value, None, dropout_p, is_causal)
217
+ if can_use_flash_attention(sdpa_params):
218
+ needs_padding = query.size(-1) % 8 != 0
219
+ og_head_size = query.size(-1)
220
+ og_scale = _calculate_scale(og_head_size, scale)
221
+ if needs_padding:
222
+ query = torch.nn.functional.pad(query, (0, 8 - query.size(-1) % 8))
223
+ key = torch.nn.functional.pad(key, (0, 8 - key.size(-1) % 8))
224
+ value = torch.nn.functional.pad(value, (0, 8 - value.size(-1) % 8))
225
+ out = torch.ops.aten._scaled_dot_product_flash_attention(
226
+ query,
227
+ key,
228
+ value,
229
+ dropout_p,
230
+ is_causal=True, # TODO: Flash accepts causal = True and for this particular op it means lower right
231
+ return_debug_mask=False,
232
+ scale=og_scale,
233
+ )[0]
234
+ return _postprocess_flash_output(out, og_head_size)
235
+ if can_use_efficient_attention(sdpa_params):
236
+ compute_log_sumexp = False
237
+ if _input_requires_grad(query, key, value):
238
+ compute_log_sumexp = True
239
+ return torch.ops.aten._efficient_attention_forward(
240
+ query.transpose(1, 2),
241
+ key.transpose(1, 2),
242
+ value.transpose(1, 2),
243
+ bias=None,
244
+ cu_seqlens_q=None,
245
+ cu_seqlens_k=None,
246
+ max_seqlen_q=None,
247
+ max_seqlen_k=None,
248
+ dropout_p=dropout_p,
249
+ custom_mask_type=int(attn_mask.variant),
250
+ compute_log_sumexp=compute_log_sumexp,
251
+ scale=scale,
252
+ causal_diagonal=None,
253
+ seqlen_k=None,
254
+ )[0].transpose(1, 2)
255
+ else:
256
+ _raise_kernel_warnings(sdpa_params)
257
+ # We cant use efficient attention the only support for lower right is via materialization
258
+ return scaled_dot_product_attention(
259
+ query,
260
+ key,
261
+ value,
262
+ attn_mask=attn_mask._materialize(query.device),
263
+ dropout_p=dropout_p,
264
+ is_causal=False,
265
+ scale=scale,
266
+ )
267
+ else:
268
+ raise ValueError(
269
+ f"CausalBias.variant must be a CausalVariant type, but found: {attn_mask.variant}"
270
+ )
271
+
272
+ @classmethod
273
+ def __torch_function__(cls, func, types, args=(), kwargs=None):
274
+ """Defines the behavior of torch.nn.functional.scaled_dot_product_attention when the attn_bias is an AttnBias"""
275
+ if kwargs is None:
276
+ kwargs = {}
277
+ if func != torch.nn.functional.scaled_dot_product_attention:
278
+ raise NotImplementedError(
279
+ "CausalBias only supports scaled_dot_product_attention"
280
+ )
281
+ return cls._dispatch(*args, **kwargs)
282
+
283
+ def __repr__(self):
284
+ return self._materialize().__repr__()
285
+
286
+
287
+ def causal_upper_left(*size) -> CausalBias:
288
+ """
289
+ Creates an upper-left triangular causal bias.
290
+
291
+ This function generates a upper-left triangular matrix to represent causal attention bias with a
292
+ diagonal offset set so that the inclusive values are aligned to the upper left corner of the matrix.
293
+ This equivalent to the `is_causal=True` argument in `scaled_dot_product_attention`.
294
+
295
+ The equivalent pytorch code for constructing this bias is:
296
+
297
+ .. code-block:: python
298
+
299
+ torch.tril(torch.ones(size, dtype=torch.bool))
300
+
301
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
302
+
303
+ .. code-block:: text
304
+
305
+ [[1, 0, 0, 0],
306
+ [1, 1, 0, 0],
307
+ [1, 1, 1, 0]]
308
+
309
+ Args:
310
+ size: The size of the bias matrix.
311
+
312
+ Returns:
313
+ CausalBias: The UPPER_LEFT triangular causal bias variant.
314
+ """
315
+ assert len(size) == 2, "causal_upper_left only supports 2D tensors"
316
+ seq_len_q, seq_len_kv = size
317
+ return CausalBias(CausalVariant.UPPER_LEFT, seq_len_q, seq_len_kv)
318
+
319
+
320
+ def causal_lower_right(*size) -> CausalBias:
321
+ """
322
+ Creates a lower-right triangular causal bias.
323
+
324
+ This function generates a lower-right triangular matrix to represent causal attention bias with a
325
+ diagonal offset set so that the inclusive values are aligned to the lower right corner of the matrix.
326
+
327
+ The equivalent pytorch code for constructing this bias is:
328
+
329
+ .. code-block:: python
330
+
331
+ diagonal_offset = size[1] - size[0]
332
+ torch.tril(
333
+ torch.ones(size, dtype=torch.bool),
334
+ diagonal=diagonal_offset,
335
+ )
336
+
337
+ For instance, with `shape=(3,4)`, the materialized bias tensor will be:
338
+
339
+ .. code-block:: text
340
+
341
+ [[1, 1, 0, 0],
342
+ [1, 1, 1, 0],
343
+ [1, 1, 1, 1]]
344
+
345
+ Args:
346
+ size: The size of the bias matrix.
347
+
348
+ Returns:
349
+ CausalBias: The LOWER_RIGHT triangular causal bias variant.
350
+ """
351
+ assert len(size) == 2, "causal_lower_right only supports 2D tensors"
352
+ seq_len_q, seq_len_kv = size
353
+ return CausalBias(CausalVariant.LOWER_RIGHT, seq_len_q, seq_len_kv)
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/_functions.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+
3
+ import torch
4
+ from . import comm
5
+ from torch.autograd import Function
6
+ from torch._utils import _get_device_index
7
+ from typing import List, Optional
8
+
9
+
10
+ class Broadcast(Function):
11
+
12
+ @staticmethod
13
+ def forward(ctx, target_gpus, *inputs):
14
+ assert all(i.device.type != 'cpu' for i in inputs), (
15
+ 'Broadcast function not implemented for CPU tensors'
16
+ )
17
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
18
+ ctx.target_gpus = target_gpus
19
+ if len(inputs) == 0:
20
+ return tuple()
21
+ ctx.num_inputs = len(inputs)
22
+ ctx.input_device = inputs[0].get_device()
23
+ outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus)
24
+ non_differentiables = []
25
+ for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]):
26
+ if not input_requires_grad:
27
+ for output in outputs:
28
+ non_differentiables.append(output[idx])
29
+ ctx.mark_non_differentiable(*non_differentiables)
30
+ return tuple([t for tensors in outputs for t in tensors])
31
+
32
+ @staticmethod
33
+ def backward(ctx, *grad_outputs):
34
+ return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs)
35
+
36
+
37
+ class ReduceAddCoalesced(Function):
38
+
39
+ @staticmethod
40
+ def forward(ctx, destination, num_inputs, *grads):
41
+ ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)]
42
+
43
+ grads_ = [grads[i:i + num_inputs]
44
+ for i in range(0, len(grads), num_inputs)]
45
+ return comm.reduce_add_coalesced(grads_, destination)
46
+
47
+ @staticmethod
48
+ def backward(ctx, *grad_outputs):
49
+ return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs)
50
+
51
+
52
+ class Gather(Function):
53
+
54
+ @staticmethod
55
+ def forward(ctx, target_device, dim, *inputs):
56
+ assert all(i.device.type != 'cpu' for i in inputs), (
57
+ 'Gather function not implemented for CPU tensors'
58
+ )
59
+ if (target_device == 'cpu'):
60
+ ctx.target_device = 'cpu'
61
+ else:
62
+ target_device = _get_device_index(target_device, True)
63
+ ctx.target_device = target_device
64
+ ctx.dim = dim
65
+ ctx.input_gpus = tuple(i.get_device() for i in inputs)
66
+ if all(t.dim() == 0 for t in inputs) and dim == 0:
67
+ inputs = tuple(t.view(1) for t in inputs)
68
+ warnings.warn('Was asked to gather along dimension 0, but all '
69
+ 'input tensors were scalars; will instead unsqueeze '
70
+ 'and return a vector.')
71
+ ctx.unsqueezed_scalar = True
72
+ else:
73
+ ctx.unsqueezed_scalar = False
74
+ ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs)
75
+ return comm.gather(inputs, ctx.dim, ctx.target_device)
76
+
77
+ @staticmethod
78
+ def backward(ctx, grad_output):
79
+ scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output)
80
+ if ctx.unsqueezed_scalar:
81
+ scattered_grads = tuple(g[0] for g in scattered_grads)
82
+ return (None, None) + scattered_grads
83
+
84
+
85
+ class Scatter(Function):
86
+
87
+ @staticmethod
88
+ def forward(ctx, target_gpus, chunk_sizes, dim, input):
89
+ target_gpus = [_get_device_index(x, True) for x in target_gpus]
90
+ ctx.dim = dim
91
+ ctx.input_device = input.get_device() if input.device.type != "cpu" else -1
92
+ streams = None
93
+ if torch.cuda.is_available() and ctx.input_device == -1:
94
+ # Perform CPU to GPU copies in a background stream
95
+ streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus]
96
+ outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams)
97
+ # Synchronize with the copy stream
98
+ if streams is not None:
99
+ for i, output in enumerate(outputs):
100
+ with torch.cuda.device(target_gpus[i]):
101
+ main_stream = torch.cuda.current_stream()
102
+ main_stream.wait_stream(streams[i])
103
+ output.record_stream(main_stream)
104
+ return outputs
105
+
106
+ @staticmethod
107
+ def backward(ctx, *grad_output):
108
+ return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output)
109
+
110
+
111
+ # background streams used for copying
112
+ _streams: Optional[List[Optional[torch.Stream]]] = None
113
+
114
+ def _get_stream(device: torch.device):
115
+ """Get a background stream for copying between CPU and target device."""
116
+ global _streams
117
+ if device.type == "cpu":
118
+ return None
119
+ device_mod = getattr(torch, device.type, None)
120
+ if device_mod is None:
121
+ return None
122
+ if _streams is None:
123
+ _streams = [None] * device_mod.device_count()
124
+ if _streams[device.index] is None:
125
+ _streams[device.index] = device_mod.Stream(device.index)
126
+ return _streams[device.index]
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/comm.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import torch
3
+ from torch.cuda import nccl
4
+ from torch._utils import _take_tensors, _flatten_dense_tensors, \
5
+ _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex
6
+ from typing import List
7
+
8
+ def broadcast(tensor, devices=None, *, out=None):
9
+ r"""Broadcasts a tensor to specified GPU devices.
10
+
11
+ Args:
12
+ tensor (Tensor): tensor to broadcast. Can be on CPU or GPU.
13
+ devices (Iterable[torch.device, str or int], optional): an iterable of
14
+ GPU devices, among which to broadcast.
15
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
16
+ store output results.
17
+
18
+ .. note::
19
+ Exactly one of :attr:`devices` and :attr:`out` must be specified.
20
+
21
+ Returns:
22
+ - If :attr:`devices` is specified,
23
+ a tuple containing copies of :attr:`tensor`, placed on
24
+ :attr:`devices`.
25
+ - If :attr:`out` is specified,
26
+ a tuple containing :attr:`out` tensors, each containing a copy of
27
+ :attr:`tensor`.
28
+ """
29
+ tensor = _handle_complex(tensor)
30
+ if not ((devices is None) ^ (out is None)):
31
+ raise RuntimeError(
32
+ f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}")
33
+ if devices is not None:
34
+ devices = [_get_device_index(d) for d in devices]
35
+ return torch._C._broadcast(tensor, devices)
36
+ else:
37
+ return torch._C._broadcast_out(tensor, out)
38
+
39
+
40
+ def broadcast_coalesced(tensors, devices, buffer_size=10485760):
41
+ """Broadcast a sequence of tensors to the specified GPUs.
42
+
43
+ Small tensors are first coalesced into a buffer to reduce the number of synchronizations.
44
+
45
+ Args:
46
+ tensors (sequence): tensors to broadcast. Must be on the same device,
47
+ either CPU or GPU.
48
+ devices (Iterable[torch.device, str or int]): an iterable of GPU
49
+ devices, among which to broadcast.
50
+ buffer_size (int): maximum size of the buffer used for coalescing
51
+
52
+ Returns:
53
+ A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`.
54
+ """
55
+ devices = [_get_device_index(d) for d in devices]
56
+ tensors = [_handle_complex(t) for t in tensors]
57
+ return torch._C._broadcast_coalesced(tensors, devices, buffer_size)
58
+
59
+
60
+ def reduce_add(inputs, destination=None):
61
+ """Sum tensors from multiple GPUs.
62
+
63
+ All inputs should have matching shapes, dtype, and layout. The output tensor
64
+ will be of the same shape, dtype, and layout.
65
+
66
+ Args:
67
+ inputs (Iterable[Tensor]): an iterable of tensors to add.
68
+ destination (int, optional): a device on which the output will be
69
+ placed (default: current device).
70
+
71
+ Returns:
72
+ A tensor containing an elementwise sum of all inputs, placed on the
73
+ :attr:`destination` device.
74
+ """
75
+ destination = _get_device_index(destination, optional=True)
76
+ input_size = inputs[0].size()
77
+ root_index = None # index of input tensor that already is on the correct device
78
+ for i, inp in enumerate(inputs):
79
+ assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs"
80
+ if inp.get_device() == destination:
81
+ root_index = i
82
+ if inp.size() != input_size:
83
+ got = 'x'.join(str(x) for x in inp.size())
84
+ expected = 'x'.join(str(x) for x in input_size)
85
+ raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}")
86
+ if root_index is None:
87
+ raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors")
88
+
89
+ if len(inputs) == 1:
90
+ return inputs[0]
91
+
92
+ if nccl.is_available(inputs):
93
+ result = torch.empty_like(inputs[root_index])
94
+ nccl.reduce(inputs, output=result, root=root_index)
95
+ else:
96
+ destination_device = torch.device(inputs[root_index].device.type, destination)
97
+ nonroot = [t for i, t in enumerate(inputs) if i != root_index]
98
+ # make a new tensor w/o clone
99
+ result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True)
100
+ for other in nonroot[1:]:
101
+ result.add_(other.to(device=destination_device, non_blocking=True))
102
+ return result
103
+
104
+
105
+ def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760):
106
+ """Sum tensors from multiple GPUs.
107
+
108
+ Small tensors are first coalesced into a buffer to reduce the number
109
+ of synchronizations.
110
+
111
+ Args:
112
+ inputs (Iterable[Iterable[Tensor]]): iterable of iterables that
113
+ contain tensors from a single device.
114
+ destination (int, optional): a device on which the output will be
115
+ placed (default: current device).
116
+ buffer_size (int): maximum size of the buffer used for coalescing
117
+
118
+ Returns:
119
+ A tuple of tensors containing an elementwise sum of each group of
120
+ inputs, placed on the ``destination`` device.
121
+ """
122
+ # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just
123
+ # return `inputs`.
124
+ dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors)
125
+ output = []
126
+ ref_order = []
127
+ # process sparse ones first since they may have different sizes on different gpus
128
+ for tensor_at_gpus in zip(*inputs):
129
+ if all(t.is_sparse for t in tensor_at_gpus):
130
+ result = reduce_add(tensor_at_gpus, destination) # this will be sparse too
131
+ output.append(result)
132
+ ref_order.append(tensor_at_gpus[0])
133
+ else:
134
+ for coll, t in zip(dense_tensors, tensor_at_gpus):
135
+ coll.append(t.to_dense() if t.is_sparse else t)
136
+ ref_order.append(dense_tensors[0][-1])
137
+ itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors]
138
+ # now the dense ones, which have consistent sizes
139
+ for chunks in zip(*itrs):
140
+ flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,)
141
+ flat_result = reduce_add(flat_tensors, destination)
142
+ for t in _unflatten_dense_tensors(flat_result, chunks[0]):
143
+ # The unflattened tensors do not share storage, and we don't expose
144
+ # base flat tensor anyways, so give them different version counters.
145
+ # See NOTE [ Version Counter in comm.*_coalesced ]
146
+ output.append(t.data)
147
+ return tuple(_reorder_tensors_as(output, ref_order))
148
+
149
+
150
+ def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None):
151
+ """Scatters tensor across multiple GPUs.
152
+
153
+ Args:
154
+ tensor (Tensor): tensor to scatter. Can be on CPU or GPU.
155
+ devices (Iterable[torch.device, str or int], optional): an iterable of
156
+ GPU devices, among which to scatter.
157
+ chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on
158
+ each device. It should match :attr:`devices` in length and sums to
159
+ ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided
160
+ into equal chunks.
161
+ dim (int, optional): A dimension along which to chunk :attr:`tensor`.
162
+ Default: ``0``.
163
+ streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among
164
+ which to execute the scatter. If not specified, the default stream will
165
+ be utilized.
166
+ out (Sequence[Tensor], optional, keyword-only): the GPU tensors to
167
+ store output results. Sizes of these tensors must match that of
168
+ :attr:`tensor`, except for :attr:`dim`, where the total size must
169
+ sum to ``tensor.size(dim)``.
170
+
171
+ .. note::
172
+ Exactly one of :attr:`devices` and :attr:`out` must be specified. When
173
+ :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and
174
+ will be inferred from sizes of :attr:`out`.
175
+
176
+ Returns:
177
+ - If :attr:`devices` is specified,
178
+ a tuple containing chunks of :attr:`tensor`, placed on
179
+ :attr:`devices`.
180
+ - If :attr:`out` is specified,
181
+ a tuple containing :attr:`out` tensors, each containing a chunk of
182
+ :attr:`tensor`.
183
+ """
184
+ tensor = _handle_complex(tensor)
185
+ if out is None:
186
+ devices = [_get_device_index(d) for d in devices]
187
+ return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams))
188
+ else:
189
+ if devices is not None:
190
+ raise RuntimeError(
191
+ f"'devices' must not be specified when 'out' is specified, but got devices={devices}")
192
+ if chunk_sizes is not None:
193
+ raise RuntimeError(
194
+ f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}")
195
+ return tuple(torch._C._scatter_out(tensor, out, dim, streams))
196
+
197
+
198
+ def gather(tensors, dim=0, destination=None, *, out=None):
199
+ r"""Gathers tensors from multiple GPU devices.
200
+
201
+ Args:
202
+ tensors (Iterable[Tensor]): an iterable of tensors to gather.
203
+ Tensor sizes in all dimensions other than :attr:`dim` have to match.
204
+ dim (int, optional): a dimension along which the tensors will be
205
+ concatenated. Default: ``0``.
206
+ destination (torch.device, str, or int, optional): the output device.
207
+ Can be CPU or CUDA. Default: the current CUDA device.
208
+ out (Tensor, optional, keyword-only): the tensor to store gather result.
209
+ Its sizes must match those of :attr:`tensors`, except for :attr:`dim`,
210
+ where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``.
211
+ Can be on CPU or CUDA.
212
+
213
+ .. note::
214
+ :attr:`destination` must not be specified when :attr:`out` is specified.
215
+
216
+ Returns:
217
+ - If :attr:`destination` is specified,
218
+ a tensor located on :attr:`destination` device, that is a result of
219
+ concatenating :attr:`tensors` along :attr:`dim`.
220
+ - If :attr:`out` is specified,
221
+ the :attr:`out` tensor, now containing results of concatenating
222
+ :attr:`tensors` along :attr:`dim`.
223
+ """
224
+ tensors = [_handle_complex(t) for t in tensors]
225
+ if out is None:
226
+ if destination == -1:
227
+ warnings.warn(
228
+ 'Using -1 to represent CPU tensor is deprecated. Please use a '
229
+ 'device object or string instead, e.g., "cpu".')
230
+ destination = _get_device_index(destination, allow_cpu=True, optional=True)
231
+ return torch._C._gather(tensors, dim, destination)
232
+ else:
233
+ if destination is not None:
234
+ raise RuntimeError(
235
+ f"'destination' must not be specified when 'out' is specified, but got destination={destination}")
236
+ return torch._C._gather_out(tensors, out, dim)
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+ import torch
3
+ import warnings
4
+ from itertools import chain
5
+ from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union
6
+ from ..modules import Module
7
+ from .scatter_gather import scatter_kwargs, gather
8
+ from .replicate import replicate
9
+ from .parallel_apply import parallel_apply
10
+ from torch._utils import (
11
+ _get_all_device_indices,
12
+ _get_available_device_type,
13
+ _get_device_index,
14
+ _get_devices_properties
15
+ )
16
+
17
+ __all__ = ['DataParallel', 'data_parallel']
18
+
19
+ def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None:
20
+ imbalance_warn = """
21
+ There is an imbalance between your GPUs. You may want to exclude GPU {} which
22
+ has less than 75% of the memory or cores of GPU {}. You can do so by setting
23
+ the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES
24
+ environment variable."""
25
+ device_ids = [_get_device_index(x, True) for x in device_ids]
26
+ dev_props = _get_devices_properties(device_ids)
27
+
28
+ def warn_imbalance(get_prop):
29
+ values = [get_prop(props) for props in dev_props]
30
+ min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1))
31
+ max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1))
32
+ if min_val / max_val < 0.75:
33
+ warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos]))
34
+ return True
35
+ return False
36
+
37
+ if warn_imbalance(lambda props: props.total_memory):
38
+ return
39
+ if warn_imbalance(lambda props: props.multi_processor_count):
40
+ return
41
+
42
+
43
+ T = TypeVar("T", bound=Module)
44
+
45
+
46
+ class DataParallel(Module, Generic[T]):
47
+ r"""Implements data parallelism at the module level.
48
+
49
+ This container parallelizes the application of the given :attr:`module` by
50
+ splitting the input across the specified devices by chunking in the batch
51
+ dimension (other objects will be copied once per device). In the forward
52
+ pass, the module is replicated on each device, and each replica handles a
53
+ portion of the input. During the backwards pass, gradients from each replica
54
+ are summed into the original module.
55
+
56
+ The batch size should be larger than the number of GPUs used.
57
+
58
+ .. warning::
59
+ It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
60
+ instead of this class, to do multi-GPU training, even if there is only a single
61
+ node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
62
+
63
+ Arbitrary positional and keyword inputs are allowed to be passed into
64
+ DataParallel but some types are specially handled. tensors will be
65
+ **scattered** on dim specified (default 0). tuple, list and dict types will
66
+ be shallow copied. The other types will be shared among different threads
67
+ and can be corrupted if written to in the model's forward pass.
68
+
69
+ The parallelized :attr:`module` must have its parameters and buffers on
70
+ ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
71
+ module.
72
+
73
+ .. warning::
74
+ In each forward, :attr:`module` is **replicated** on each device, so any
75
+ updates to the running module in ``forward`` will be lost. For example,
76
+ if :attr:`module` has a counter attribute that is incremented in each
77
+ ``forward``, it will always stay at the initial value because the update
78
+ is done on the replicas which are destroyed after ``forward``. However,
79
+ :class:`~torch.nn.DataParallel` guarantees that the replica on
80
+ ``device[0]`` will have its parameters and buffers sharing storage with
81
+ the base parallelized :attr:`module`. So **in-place** updates to the
82
+ parameters or buffers on ``device[0]`` will be recorded. E.g.,
83
+ :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm`
84
+ rely on this behavior to update the buffers.
85
+
86
+ .. warning::
87
+ Forward and backward hooks defined on :attr:`module` and its submodules
88
+ will be invoked ``len(device_ids)`` times, each with inputs located on
89
+ a particular device. Particularly, the hooks are only guaranteed to be
90
+ executed in correct order with respect to operations on corresponding
91
+ devices. For example, it is not guaranteed that hooks set via
92
+ :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before
93
+ `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but
94
+ that each such hook be executed before the corresponding
95
+ :meth:`~torch.nn.Module.forward` call of that device.
96
+
97
+ .. warning::
98
+ When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in
99
+ :func:`forward`, this wrapper will return a vector of length equal to
100
+ number of devices used in data parallelism, containing the result from
101
+ each device.
102
+
103
+ .. note::
104
+ There is a subtlety in using the
105
+ ``pack sequence -> recurrent network -> unpack sequence`` pattern in a
106
+ :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
107
+ See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for
108
+ details.
109
+
110
+
111
+ Args:
112
+ module (Module): module to be parallelized
113
+ device_ids (list of int or torch.device): CUDA devices (default: all devices)
114
+ output_device (int or torch.device): device location of output (default: device_ids[0])
115
+
116
+ Attributes:
117
+ module (Module): the module to be parallelized
118
+
119
+ Example::
120
+
121
+ >>> # xdoctest: +SKIP
122
+ >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
123
+ >>> output = net(input_var) # input_var can be on any device, including CPU
124
+ """
125
+
126
+ # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
127
+
128
+ def __init__(
129
+ self,
130
+ module: T,
131
+ device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
132
+ output_device: Optional[Union[int, torch.device]] = None,
133
+ dim: int = 0,
134
+ ) -> None:
135
+ super().__init__()
136
+ torch._C._log_api_usage_once("torch.nn.parallel.DataParallel")
137
+ device_type = _get_available_device_type()
138
+ if device_type is None:
139
+ self.module = module
140
+ self.device_ids = []
141
+ return
142
+
143
+ if device_ids is None:
144
+ device_ids = _get_all_device_indices()
145
+
146
+ if device_ids is None:
147
+ raise RuntimeError("no available devices were found")
148
+
149
+ if output_device is None:
150
+ output_device = device_ids[0]
151
+
152
+ self.dim = dim
153
+ self.module = module
154
+ self.device_ids = [_get_device_index(x, True) for x in device_ids]
155
+ self.output_device = _get_device_index(output_device, True)
156
+ self.src_device_obj = torch.device(device_type, self.device_ids[0])
157
+
158
+ if device_type == "cuda":
159
+ _check_balance(self.device_ids)
160
+
161
+ if len(self.device_ids) == 1:
162
+ self.module.to(self.src_device_obj)
163
+
164
+ def forward(self, *inputs: Any, **kwargs: Any) -> Any:
165
+ with torch.autograd.profiler.record_function("DataParallel.forward"):
166
+ if not self.device_ids:
167
+ return self.module(*inputs, **kwargs)
168
+
169
+ for t in chain(self.module.parameters(), self.module.buffers()):
170
+ if t.device != self.src_device_obj:
171
+ raise RuntimeError("module must have its parameters and buffers "
172
+ f"on device {self.src_device_obj} (device_ids[0]) but found one of "
173
+ f"them on device: {t.device}")
174
+
175
+ inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids)
176
+ # for forward function without any inputs, empty list and dict will be created
177
+ # so the module can be executed on one device which is the first one in device_ids
178
+ if not inputs and not module_kwargs:
179
+ inputs = ((),)
180
+ module_kwargs = ({},)
181
+
182
+ if len(self.device_ids) == 1:
183
+ return self.module(*inputs[0], **module_kwargs[0])
184
+ replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
185
+ outputs = self.parallel_apply(replicas, inputs, module_kwargs)
186
+ return self.gather(outputs, self.output_device)
187
+
188
+ def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]:
189
+ return replicate(module, device_ids, not torch.is_grad_enabled())
190
+
191
+ def scatter(
192
+ self,
193
+ inputs: Tuple[Any, ...],
194
+ kwargs: Optional[Dict[str, Any]],
195
+ device_ids: Sequence[Union[int, torch.device]],
196
+ ) -> Any:
197
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
198
+
199
+ def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]:
200
+ return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
201
+
202
+ def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any:
203
+ return gather(outputs, output_device, dim=self.dim)
204
+
205
+
206
+ def data_parallel(
207
+ module: Module,
208
+ inputs: Any,
209
+ device_ids: Optional[Sequence[Union[int, torch.device]]] = None,
210
+ output_device: Optional[Union[int, torch.device]] = None,
211
+ dim: int = 0,
212
+ module_kwargs: Optional[Any] = None,
213
+ ) -> torch.Tensor:
214
+ r"""Evaluate module(input) in parallel across the GPUs given in device_ids.
215
+
216
+ This is the functional version of the DataParallel module.
217
+
218
+ Args:
219
+ module (Module): the module to evaluate in parallel
220
+ inputs (Tensor): inputs to the module
221
+ device_ids (list of int or torch.device): GPU ids on which to replicate module
222
+ output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
223
+ (default: device_ids[0])
224
+ Returns:
225
+ a Tensor containing the result of module(input) located on
226
+ output_device
227
+ """
228
+ if not isinstance(inputs, tuple):
229
+ inputs = (inputs,) if inputs is not None else ()
230
+
231
+ device_type = _get_available_device_type()
232
+
233
+ if device_type is None:
234
+ raise RuntimeError("device type could not be determined")
235
+
236
+ if device_ids is None:
237
+ device_ids = _get_all_device_indices()
238
+
239
+ if device_ids is None:
240
+ raise RuntimeError("no available devices were found")
241
+
242
+ if output_device is None:
243
+ output_device = device_ids[0]
244
+
245
+ device_ids = [_get_device_index(x, True) for x in device_ids]
246
+ output_device = _get_device_index(output_device, True)
247
+ src_device_obj = torch.device(device_type, device_ids[0])
248
+
249
+ for t in chain(module.parameters(), module.buffers()):
250
+ if t.device != src_device_obj:
251
+ raise RuntimeError("module must have its parameters and buffers "
252
+ f"on device {src_device_obj} (device_ids[0]) but found one of "
253
+ f"them on device: {t.device}")
254
+
255
+ inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
256
+ # for module without any inputs, empty list and dict will be created
257
+ # so the module can be executed on one device which is the first one in device_ids
258
+ if not inputs and not module_kwargs:
259
+ inputs = ((),)
260
+ module_kwargs = ({},)
261
+
262
+ assert module_kwargs is not None
263
+
264
+ if len(device_ids) == 1:
265
+ return module(*inputs[0], **module_kwargs[0])
266
+ used_device_ids = device_ids[:len(inputs)]
267
+ replicas = replicate(module, used_device_ids)
268
+ outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
269
+ return gather(outputs, output_device, dim)
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/distributed.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import torch
3
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
4
+ from ..modules import Module
5
+ from torch.cuda._utils import _get_device_index
6
+ from torch.cuda.amp import autocast
7
+ from torch._utils import ExceptionWrapper
8
+
9
+ __all__ = ['get_a_var', 'parallel_apply']
10
+
11
+ def get_a_var(obj: Union[torch.Tensor, List[Any], Tuple[Any, ...], Dict[Any, Any]]) -> Optional[torch.Tensor]:
12
+ if isinstance(obj, torch.Tensor):
13
+ return obj
14
+
15
+ if isinstance(obj, (list, tuple)):
16
+ for result in map(get_a_var, obj):
17
+ if isinstance(result, torch.Tensor):
18
+ return result
19
+ if isinstance(obj, dict):
20
+ for result in map(get_a_var, obj.items()):
21
+ if isinstance(result, torch.Tensor):
22
+ return result
23
+ return None
24
+
25
+ def parallel_apply(
26
+ modules: Sequence[Module],
27
+ inputs: Sequence[Any],
28
+ kwargs_tup: Optional[Sequence[Dict[str, Any]]] = None,
29
+ devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None,
30
+ ) -> List[Any]:
31
+ r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`.
32
+
33
+ Args:
34
+ modules (Module): modules to be parallelized
35
+ inputs (tensor): inputs to the modules
36
+ devices (list of int or torch.device): CUDA devices
37
+
38
+ :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
39
+ :attr:`devices` (if given) should all have same length. Moreover, each
40
+ element of :attr:`inputs` can either be a single object as the only argument
41
+ to a module, or a collection of positional arguments.
42
+ """
43
+ assert len(modules) == len(inputs), f'The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}'
44
+ if kwargs_tup is not None:
45
+ assert len(modules) == len(kwargs_tup)
46
+ else:
47
+ kwargs_tup = (cast(Dict[str, Any], {}),) * len(modules)
48
+ if devices is not None:
49
+ assert len(modules) == len(devices)
50
+ else:
51
+ devices = [None] * len(modules)
52
+ devices = [_get_device_index(x, True) for x in devices]
53
+ streams = [torch.cuda.current_stream(x) for x in devices]
54
+ lock = threading.Lock()
55
+ results = {}
56
+ grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled()
57
+
58
+ def _worker(
59
+ i: int,
60
+ module: Module,
61
+ input: Any,
62
+ kwargs: Dict[str, Any],
63
+ device: Optional[Union[int, torch.device]] = None,
64
+ stream: Optional[torch.cuda.Stream] = None,
65
+ ) -> None:
66
+ torch.set_grad_enabled(grad_enabled)
67
+ if device is None:
68
+ t = get_a_var(input)
69
+ if t is None:
70
+ with lock:
71
+ results[i] = ExceptionWrapper(
72
+ where=f"in replica {i}, no device was provided and no tensor input was found; "
73
+ "device cannot be resolved")
74
+ return
75
+ device = t.get_device()
76
+ if stream is None:
77
+ stream = torch.cuda.current_stream(device)
78
+ try:
79
+ with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):
80
+ # this also avoids accidental slicing of `input` if it is a Tensor
81
+ if not isinstance(input, (list, tuple)):
82
+ input = (input,)
83
+ output = module(*input, **kwargs)
84
+ with lock:
85
+ results[i] = output
86
+ except Exception:
87
+ with lock:
88
+ results[i] = ExceptionWrapper(
89
+ where=f"in replica {i} on device {device}")
90
+
91
+ if len(modules) > 1:
92
+ threads = [threading.Thread(target=_worker,
93
+ args=(i, module, input, kwargs, device, stream))
94
+ for i, (module, input, kwargs, device, stream) in
95
+ enumerate(zip(modules, inputs, kwargs_tup, devices, streams))]
96
+
97
+ for thread in threads:
98
+ thread.start()
99
+ for thread in threads:
100
+ thread.join()
101
+ else:
102
+ _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0])
103
+
104
+ outputs = []
105
+ for i in range(len(inputs)):
106
+ output = results[i]
107
+ if isinstance(output, ExceptionWrapper):
108
+ output.reraise()
109
+ outputs.append(output)
110
+ return outputs
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/replicate.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ..modules import Module
3
+ from . import comm
4
+ from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Sequence, Set, TypeVar, Union, cast
5
+ from torch._utils import _get_device_index
6
+
7
+ from collections import OrderedDict
8
+
9
+ if TYPE_CHECKING:
10
+ import torch.jit
11
+ import torch.jit._state
12
+
13
+ __all__ = ['replicate']
14
+
15
+ def _is_script_module(module: Module) -> bool:
16
+ import torch.jit
17
+ return isinstance(module, torch.jit.ScriptModule)
18
+
19
+
20
+ def _is_script_method(module: Module) -> bool:
21
+ import torch.jit
22
+ return isinstance(module, torch._C.ScriptMethod)
23
+
24
+
25
+ def _init_script_module() -> "torch.jit.ScriptModule":
26
+ import torch.jit
27
+ return torch.jit.ScriptModule()
28
+
29
+
30
+ def _is_jit_enabled() -> "torch.jit._state.EnabledProxy":
31
+ import torch.jit._state
32
+ return torch.jit._state._enabled
33
+
34
+
35
+ # Check if we can safely replicate the module.
36
+ # there are two types of module:
37
+ # 1. python modules
38
+ # 2. ScriptModule
39
+ #
40
+ # currently a module cannot be replicated properly if the descendants of
41
+ # any ScriptModule contains python module (type 1 above)
42
+ def _replicatable_module(module: Module, memo: Optional[Set[Module]] = None) -> bool:
43
+
44
+ # module.modules() contains module itself as the first element
45
+ def descendant_modules(module: Module) -> Iterator[Module]:
46
+ gen = module.modules()
47
+ next(gen)
48
+ return gen
49
+
50
+ if not _is_jit_enabled():
51
+ return True
52
+ if memo is None:
53
+ memo = set()
54
+
55
+ # memoize visited modules
56
+ memo.add(module)
57
+ if _is_script_module(module):
58
+ memo.update(descendant_modules(module))
59
+ return all(_is_script_module(descendant) for
60
+ descendant in descendant_modules(module))
61
+
62
+ for child in module.children():
63
+ # since any unreplicatable module will cause the check to return
64
+ # False early, visited modules here can be safely ignored.
65
+ if child in memo:
66
+ continue
67
+ if not _replicatable_module(child, memo):
68
+ return False
69
+
70
+ return True
71
+
72
+ def _broadcast_coalesced_reshape(
73
+ tensors: Sequence[torch.Tensor],
74
+ devices: Sequence[Union[int, torch.device]],
75
+ detach: bool = False,
76
+ ) -> List[List[torch.Tensor]]:
77
+ from ._functions import Broadcast
78
+ if detach:
79
+ return comm.broadcast_coalesced(tensors, devices)
80
+ else:
81
+ # Use the autograd function to broadcast if not detach
82
+ if len(tensors) > 0:
83
+ tensor_copies = Broadcast.apply(devices, *tensors)
84
+ return [tensor_copies[i:i + len(tensors)]
85
+ for i in range(0, len(tensor_copies), len(tensors))]
86
+ else:
87
+ return []
88
+
89
+
90
+ T = TypeVar("T", bound=Module)
91
+
92
+
93
+ def replicate(
94
+ network: T,
95
+ devices: Sequence[Union[int, torch.device]],
96
+ detach: bool = False,
97
+ ) -> List[T]:
98
+ if not _replicatable_module(network):
99
+ raise RuntimeError("Cannot replicate network where python modules are "
100
+ "childrens of ScriptModule")
101
+
102
+ if not devices:
103
+ return []
104
+
105
+ devices = [_get_device_index(x, True) for x in devices]
106
+ num_replicas = len(devices)
107
+
108
+ params = list(network.parameters())
109
+ param_indices = {param: idx for idx, param in enumerate(params)}
110
+ param_copies = _broadcast_coalesced_reshape(params, devices, detach)
111
+
112
+ buffers = list(network.buffers())
113
+ buffers_rg: List[torch.Tensor] = []
114
+ buffers_not_rg: List[torch.Tensor] = []
115
+ for buf in buffers:
116
+ if buf.requires_grad and not detach:
117
+ buffers_rg.append(buf)
118
+ else:
119
+ buffers_not_rg.append(buf)
120
+
121
+ buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)}
122
+ buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)}
123
+
124
+ buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach)
125
+ buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True)
126
+
127
+ modules = list(network.modules())
128
+ module_copies: List[List[Module]] = [[] for _ in devices]
129
+ module_indices: Dict[Module, int] = {}
130
+
131
+ for i, module in enumerate(modules):
132
+ module_indices[module] = i
133
+ for j in range(num_replicas):
134
+ replica = module._replicate_for_data_parallel()
135
+ # This is a temporary fix for DDP. DDP needs to access the
136
+ # replicated model parameters. It used to do so through
137
+ # `mode.parameters()`. The fix added in #33907 for DP stops the
138
+ # `parameters()` API from exposing the replicated parameters.
139
+ # Hence, we add a `_former_parameters` dict here to support DDP.
140
+ replica._former_parameters = OrderedDict()
141
+
142
+ module_copies[j].append(replica)
143
+
144
+ for i, module in enumerate(modules):
145
+ for key, child in module._modules.items():
146
+ if child is None:
147
+ for j in range(num_replicas):
148
+ replica = module_copies[j][i]
149
+ replica._modules[key] = None
150
+ else:
151
+ module_idx = module_indices[child]
152
+ for j in range(num_replicas):
153
+ replica = module_copies[j][i]
154
+ setattr(replica, key, module_copies[j][module_idx])
155
+ for key, param in module._parameters.items():
156
+ if param is None:
157
+ for j in range(num_replicas):
158
+ replica = module_copies[j][i]
159
+ replica._parameters[key] = None
160
+ else:
161
+ param_idx = param_indices[param]
162
+ for j in range(num_replicas):
163
+ replica = module_copies[j][i]
164
+ param_copy = param_copies[j][param_idx]
165
+ # parameters in replicas are no longer leaves,
166
+ # so setattr them as non-parameter attributes
167
+ setattr(replica, key, param_copy)
168
+ # expose the parameter for DDP
169
+ replica._former_parameters[key] = param_copy
170
+ for key, buf in module._buffers.items(): # type: ignore[assignment]
171
+ if buf is None:
172
+ for j in range(num_replicas):
173
+ replica = module_copies[j][i]
174
+ replica._buffers[key] = None
175
+ else:
176
+ if buf.requires_grad and not detach:
177
+ buffer_copies = buffer_copies_rg
178
+ buffer_idx = buffer_indices_rg[buf]
179
+ else:
180
+ buffer_copies = buffer_copies_not_rg
181
+ buffer_idx = buffer_indices_not_rg[buf]
182
+ for j in range(num_replicas):
183
+ replica = module_copies[j][i]
184
+ setattr(replica, key, buffer_copies[j][buffer_idx])
185
+
186
+ return [cast(T, module_copies[j][0]) for j in range(num_replicas)]
llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload
3
+ from ._functions import Scatter, Gather
4
+ import warnings
5
+
6
+ __all__ = ['scatter', 'scatter_kwargs', 'gather']
7
+
8
+ def is_namedtuple(obj: Any) -> bool:
9
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
10
+ warnings.warn("is_namedtuple is deprecated, please use the python checks instead")
11
+ return _is_namedtuple(obj)
12
+
13
+ def _is_namedtuple(obj: Any) -> bool:
14
+ # Check if type was created from collections.namedtuple or a typing.NamedTuple.
15
+ return (
16
+ isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
17
+ )
18
+
19
+
20
+ T = TypeVar("T", dict, list, tuple)
21
+
22
+ # For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise.
23
+ @overload
24
+ def scatter(
25
+ inputs: torch.Tensor,
26
+ target_gpus: Sequence[Union[int, torch.device]],
27
+ dim: int = ...,
28
+ ) -> Tuple[torch.Tensor, ...]:
29
+ ...
30
+
31
+ @overload
32
+ def scatter(inputs: T, target_gpus: Sequence[Union[int, torch.device]], dim: int = ...) -> List[T]:
33
+ ...
34
+
35
+ def scatter(inputs, target_gpus, dim=0):
36
+ r"""Slice tensors into approximately equal chunks and distributes them across given GPUs.
37
+
38
+ Duplicates references to objects that are not tensors.
39
+ """
40
+ def scatter_map(obj):
41
+ if isinstance(obj, torch.Tensor):
42
+ return Scatter.apply(target_gpus, None, dim, obj)
43
+ if _is_namedtuple(obj):
44
+ return [type(obj)(*args) for args in zip(*map(scatter_map, obj))]
45
+ if isinstance(obj, tuple) and len(obj) > 0:
46
+ return list(zip(*map(scatter_map, obj)))
47
+ if isinstance(obj, list) and len(obj) > 0:
48
+ return [list(i) for i in zip(*map(scatter_map, obj))]
49
+ if isinstance(obj, dict) and len(obj) > 0:
50
+ return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))]
51
+ return [obj for _ in target_gpus]
52
+
53
+ # After scatter_map is called, a scatter_map cell will exist. This cell
54
+ # has a reference to the actual function scatter_map, which has references
55
+ # to a closure that has a reference to the scatter_map cell (because the
56
+ # fn is recursive). To avoid this reference cycle, we set the function to
57
+ # None, clearing the cell
58
+ try:
59
+ res = scatter_map(inputs)
60
+ finally:
61
+ scatter_map = None # type: ignore[assignment]
62
+ return res
63
+
64
+
65
+ def scatter_kwargs(
66
+ inputs: Tuple[Any, ...],
67
+ kwargs: Optional[Dict[str, Any]],
68
+ target_gpus: Sequence[Union[int, torch.device]],
69
+ dim: int = 0,
70
+ ) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]:
71
+ r"""Scatter with support for kwargs dictionary."""
72
+ scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else []
73
+ scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
74
+ if len(scattered_inputs) < len(scattered_kwargs):
75
+ scattered_inputs.extend(() for _ in range(len(scattered_kwargs) - len(scattered_inputs)))
76
+ elif len(scattered_kwargs) < len(inputs):
77
+ scattered_kwargs.extend({} for _ in range(len(scattered_inputs) - len(scattered_kwargs)))
78
+ return tuple(scattered_inputs), tuple(scattered_kwargs)
79
+
80
+
81
+ def gather(outputs: Any, target_device: Union[int, torch.device], dim: int = 0) -> Any:
82
+ r"""Gather tensors from different GPUs on a specified device.
83
+
84
+ Use 'cpu' for CPU to avoid a deprecation warning.
85
+ """
86
+ def gather_map(outputs):
87
+ out = outputs[0]
88
+ if isinstance(out, torch.Tensor):
89
+ return Gather.apply(target_device, dim, *outputs)
90
+ if out is None:
91
+ return None
92
+ if isinstance(out, dict):
93
+ if not all(len(out) == len(d) for d in outputs):
94
+ raise ValueError('All dicts must have the same number of keys')
95
+ return type(out)((k, gather_map([d[k] for d in outputs]))
96
+ for k in out)
97
+ if _is_namedtuple(out):
98
+ return type(out)._make(map(gather_map, zip(*outputs)))
99
+ return type(out)(map(gather_map, zip(*outputs)))
100
+
101
+ # Recursive function calls like this create reference cycles.
102
+ # Setting the function to None clears the refcycle.
103
+ try:
104
+ res = gather_map(outputs)
105
+ finally:
106
+ gather_map = None # type: ignore[assignment]
107
+ return res
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (217 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
2
+ from torch.ao.nn.quantizable.modules.rnn import LSTM
3
+ from torch.ao.nn.quantizable.modules.rnn import LSTMCell
4
+
5
+ __all__ = [
6
+ 'LSTM',
7
+ 'LSTMCell',
8
+ 'MultiheadAttention',
9
+ ]
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (417 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (644 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (647 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantizable.modules.activation import MultiheadAttention
llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantizable Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantizable`, and
5
+ is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantizable/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantizable.modules.rnn import LSTM
11
+ from torch.ao.nn.quantizable.modules.rnn import LSTMCell
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import dynamic # noqa: F403
2
+ from . import functional # noqa: F403
3
+ from . import modules # noqa: F403
4
+ from .modules import * # noqa: F403
5
+ from .modules import MaxPool2d
6
+
7
+ __all__ = [
8
+ 'BatchNorm2d',
9
+ 'BatchNorm3d',
10
+ 'Conv1d',
11
+ 'Conv2d',
12
+ 'Conv3d',
13
+ 'ConvTranspose1d',
14
+ 'ConvTranspose2d',
15
+ 'ConvTranspose3d',
16
+ 'DeQuantize',
17
+ 'Dropout',
18
+ 'ELU',
19
+ 'Embedding',
20
+ 'EmbeddingBag',
21
+ 'GroupNorm',
22
+ 'Hardswish',
23
+ 'InstanceNorm1d',
24
+ 'InstanceNorm2d',
25
+ 'InstanceNorm3d',
26
+ 'LayerNorm',
27
+ 'LeakyReLU',
28
+ 'Linear',
29
+ 'LSTM',
30
+ 'MultiheadAttention',
31
+ 'PReLU',
32
+ 'Quantize',
33
+ 'ReLU6',
34
+ 'Sigmoid',
35
+ 'Softmax',
36
+ # Wrapper modules
37
+ 'FloatFunctional',
38
+ 'FXFloatFunctional',
39
+ 'QFunctional',
40
+ ]
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc ADDED
Binary file (460 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .modules import * # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (226 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d
14
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell, LSTMCell, GRUCell, LSTM
15
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag
16
+
17
+ __all__ = [
18
+ 'Linear',
19
+ 'Conv1d',
20
+ 'Conv2d',
21
+ 'Conv3d',
22
+ 'ConvTranspose1d',
23
+ 'ConvTranspose2d',
24
+ 'ConvTranspose3d',
25
+ 'RNNCell',
26
+ 'LSTMCell',
27
+ 'GRUCell',
28
+ 'LSTM',
29
+ 'Embedding',
30
+ 'EmbeddingBag',
31
+ ]
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.16 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc ADDED
Binary file (896 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc ADDED
Binary file (659 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc ADDED
Binary file (806 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc ADDED
Binary file (697 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (857 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvNd
13
+ from torch.ao.nn.quantized.reference.modules.conv import Conv1d
14
+ from torch.ao.nn.quantized.reference.modules.conv import Conv2d
15
+ from torch.ao.nn.quantized.reference.modules.conv import Conv3d
16
+ from torch.ao.nn.quantized.reference.modules.conv import _ConvTransposeNd
17
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose1d
18
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose2d
19
+ from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose3d
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.linear import Linear
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCellBase
13
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNCell
14
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTMCell
15
+ from torch.ao.nn.quantized.reference.modules.rnn import GRUCell
16
+ from torch.ao.nn.quantized.reference.modules.rnn import RNNBase
17
+ from torch.ao.nn.quantized.reference.modules.rnn import LSTM
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+
12
+ from torch.ao.nn.quantized.reference.modules.sparse import Embedding
13
+ from torch.ao.nn.quantized.reference.modules.sparse import EmbeddingBag
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Reference Modules.
3
+
4
+ This module is in the process of migration to
5
+ `torch/ao/nn/quantized/reference`, and is kept here for
6
+ compatibility while the migration process is ongoing.
7
+ If you are adding a new entry/functionality, please, add it to the
8
+ appropriate file under the `torch/ao/nn/quantized/reference`,
9
+ while adding an import statement here.
10
+ """
11
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_weight
12
+ from torch.ao.nn.quantized.reference.modules.utils import _quantize_and_dequantize_weight
13
+ from torch.ao.nn.quantized.reference.modules.utils import _save_weight_qparams
14
+ from torch.ao.nn.quantized.reference.modules.utils import _get_weight_qparam_keys
15
+ from torch.ao.nn.quantized.reference.modules.utils import ReferenceQuantizedModule
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from torch.ao.nn.quantized.dynamic import * # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (245 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/linear.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""Quantized Dynamic Modules.
3
+
4
+ This file is in the process of migration to `torch/ao/nn/quantized/dynamic`,
5
+ and is kept here for compatibility while the migration process is ongoing.
6
+ If you are adding a new entry/functionality, please, add it to the
7
+ appropriate file under the `torch/ao/nn/quantized/dynamic/modules`,
8
+ while adding an import statement here.
9
+ """
10
+ from torch.ao.nn.quantized.dynamic.modules.linear import Linear
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/functional.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""nn.quantized.functional.
2
+
3
+ Quantized equivalents of the `nn.functional`.
4
+
5
+ Note::
6
+ This location is in the process of being deprecated.
7
+ Please, use the `torch.ao.nn.quantized.functional` instead.
8
+ """
9
+
10
+ from torch.ao.nn.quantized.functional import * # noqa: F401,F403
llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (840 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__init__.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import rnn
2
+ from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_
3
+ from .weight_norm import weight_norm, remove_weight_norm
4
+ from .convert_parameters import parameters_to_vector, vector_to_parameters
5
+ from .spectral_norm import spectral_norm, remove_spectral_norm
6
+ from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights, fuse_linear_bn_eval, fuse_linear_bn_weights
7
+ from .memory_format import convert_conv2d_weight_memory_format, convert_conv3d_weight_memory_format
8
+ from . import parametrizations
9
+ from .init import skip_init
10
+ from . import stateless
11
+
12
+ __all__ = [
13
+ "clip_grad_norm",
14
+ "clip_grad_norm_",
15
+ "clip_grad_value_",
16
+ "convert_conv2d_weight_memory_format",
17
+ "convert_conv3d_weight_memory_format",
18
+ "fuse_conv_bn_eval",
19
+ "fuse_conv_bn_weights",
20
+ "fuse_linear_bn_eval",
21
+ "fuse_linear_bn_weights",
22
+ "parameters_to_vector",
23
+ "parametrizations",
24
+ "remove_spectral_norm",
25
+ "remove_weight_norm",
26
+ "rnn",
27
+ "skip_init",
28
+ "spectral_norm",
29
+ "stateless",
30
+ "vector_to_parameters",
31
+ "weight_norm",
32
+ ]
llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Callable
2
+ import importlib
3
+ import warnings
4
+
5
+
6
+ _MESSAGE_TEMPLATE = r"Usage of '{old_location}' is deprecated; please use '{new_location}' instead."
7
+
8
+ def lazy_deprecated_import(all: List[str], old_module: str, new_module: str) -> Callable:
9
+ r"""Import utility to lazily import deprecated packages / modules / functional.
10
+
11
+ The old_module and new_module are also used in the deprecation warning defined
12
+ by the `_MESSAGE_TEMPLATE`.
13
+
14
+ Args:
15
+ all: The list of the functions that are imported. Generally, the module's
16
+ __all__ list of the module.
17
+ old_module: Old module location
18
+ new_module: New module location / Migrated location
19
+
20
+ Returns:
21
+ Callable to assign to the `__getattr__`
22
+
23
+ Usage:
24
+
25
+ # In the `torch/nn/quantized/functional.py`
26
+ from torch.nn.utils._deprecation_utils import lazy_deprecated_import
27
+ _MIGRATED_TO = "torch.ao.nn.quantized.functional"
28
+ __getattr__ = lazy_deprecated_import(
29
+ all=__all__,
30
+ old_module=__name__,
31
+ new_module=_MIGRATED_TO)
32
+ """
33
+ warning_message = _MESSAGE_TEMPLATE.format(
34
+ old_location=old_module,
35
+ new_location=new_module)
36
+
37
+ def getattr_dunder(name):
38
+ if name in all:
39
+ # We are using the "RuntimeWarning" to make sure it is not
40
+ # ignored by default.
41
+ warnings.warn(warning_message, RuntimeWarning)
42
+ package = importlib.import_module(new_module)
43
+ return getattr(package, name)
44
+ raise AttributeError(f"Module {new_module!r} has no attribute {name!r}.")
45
+ return getattr_dunder
llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .conv_expanded_weights import ConvPerSampleGrad
2
+ from .embedding_expanded_weights import EmbeddingPerSampleGrad
3
+ from .group_norm_expanded_weights import GroupNormPerSampleGrad
4
+ from .instance_norm_expanded_weights import InstanceNormPerSampleGrad
5
+ from .layer_norm_expanded_weights import LayerNormPerSampleGrad
6
+ from .linear_expanded_weights import LinearPerSampleGrad
7
+ from .expanded_weights_impl import ExpandedWeight
8
+
9
+ __all__ = ['ExpandedWeight']
llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc ADDED
Binary file (2.27 kB). View file