applied-ai-018 commited on
Commit
86a9718
·
verified ·
1 Parent(s): 7e985af

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/12.input_layernorm.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  7. ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg.pt +3 -0
  8. ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  9. ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/fp32.pt +3 -0
  10. ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  11. ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  12. ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  13. ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg.pt +3 -0
  14. ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  15. ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  16. ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  17. ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  18. ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  19. ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  20. ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  21. ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  22. ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  23. ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg.pt +3 -0
  24. ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  25. ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/fp32.pt +3 -0
  26. ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg.pt +3 -0
  27. ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg_sq.pt +3 -0
  28. ckpts/universal/global_step120/zero/8.attention.dense.weight/fp32.pt +3 -0
  29. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  30. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  31. ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  32. ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  33. ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  34. ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  35. venv/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py +26 -0
  36. venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py +23 -0
  37. venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py +30 -0
  38. venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py +24 -0
  39. venv/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py +21 -0
  40. venv/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py +26 -0
  41. venv/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py +19 -0
  42. venv/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py +29 -0
  43. venv/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py +17 -0
  44. venv/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py +41 -0
  45. venv/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py +1 -0
  46. venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py +261 -0
ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f05b11c06efbb6011e46324aeff91d6969c207cfbb4d3f538988ba16202597d
3
+ size 9372
ckpts/universal/global_step120/zero/12.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7acaaa34c1c1a2fa21ffeba8bb02a1e5e3c53df61fec46e5e720bc2503086377
3
+ size 9387
ckpts/universal/global_step120/zero/12.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5440dd1c78e8a135fde05c1aafdfc6ca2addb712ea8ba21ea6775829fec29d71
3
+ size 9293
ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44799c5eca0f7b814a36ada8a2a985441812e58a613f68fbf4a527c11bf0f9da
3
+ size 33555612
ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d4ff2ab006f3da15b9bfe65118d2106bd3d58e3948524bd1543866e56ef9957
3
+ size 33555627
ckpts/universal/global_step120/zero/14.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3108e7a90cec5a3472de3afd1e14002a69023393be351abf71a3efbcd21582c1
3
+ size 33555533
ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b67e13b1af7d8d5c6d522c93188eb1b59b42d6562bc2eb65983c44e8f96a127
3
+ size 9372
ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22fcdc17c849cd6418aae8ea5104e5c4fa7e7741611e1946c5644b2a5dcfad48
3
+ size 9387
ckpts/universal/global_step120/zero/14.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d25990e3302cd65f6895327519b14c5fceb54e98ba62d0c1bff59dd9a53d10
3
+ size 9293
ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b21979a3ecb173071fbb2f4162e2729cd7d7c076edf8738243484e51c01ee749
3
+ size 33555612
ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc1efd776071f65554aacd4cb630512eab6db181ec866f208bbd1f699d90d303
3
+ size 33555627
ckpts/universal/global_step120/zero/19.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e436534b1d579979b5dc8db187e435a1fea6fc1713ca7dd5b8e412a90a4723e
3
+ size 33555533
ckpts/universal/global_step120/zero/21.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6e2d7fd20d792282caae21ed4736b2acfcfa2641a4d52061cd2d59b7818a484
3
+ size 16778396
ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d25a4818c7340186511af210b151ebe56995bd46863faea70e4c5a923fc21386
3
+ size 33555612
ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33a92271a223d91e65ca0abe5a87b9b03f404d0a4ab1d8895d05084be74421c6
3
+ size 33555627
ckpts/universal/global_step120/zero/23.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05c9f973e08ceaedabdf8dc136cfd9579b6dfeca3d437125d515cbd1c91d15cb
3
+ size 33555533
ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41200e7c76d1d8d858393328284a376692c2ebdc13db3055a2ce3fa15723c866
3
+ size 33555612
ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f66caeb5b4e74302196d38db5cd119b791be8b6d1811ad3099489c5c9f3b5d46
3
+ size 33555627
ckpts/universal/global_step120/zero/24.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82b02754a37e50b45da0958d132c2b74fb1c7d56bbaca856ac59800b72a7194c
3
+ size 33555533
ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b52420175eca60bb5df5d9e84e2bbf745fb9a25a992fe84d0f7eefcd6ad7ae8e
3
+ size 33555612
ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d79a52574022fc5d55a24ca3ba3164f445138534a2203641d17d34fad714a0ef
3
+ size 33555627
ckpts/universal/global_step120/zero/25.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:acd0074ae34fd37c15d1b698532bdc2101a5d5949484b38a55573c3f7a00f4d9
3
+ size 33555533
ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9acd645cb2197f7697afb3eade5575c1e41c0fd53f2bb5449d04c0a9ebafd8e0
3
+ size 9372
ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56dc8af36b686f4bd61b94bf34ab79073c751c5f1f3939c9b985246c445e4062
3
+ size 9387
ckpts/universal/global_step120/zero/26.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd2df67d75555ac4678a88f301d379694bbafbb8ee6ffc6a47acc1f43827f063
3
+ size 9293
ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9474660312ed211cad82f8dcb9fda0c509688ebc5c56192005f0d55cab49621f
3
+ size 16778396
ckpts/universal/global_step120/zero/8.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:162c890f9519502cdab7a9f1f7a770cbaba48bb56d2baa58bd5b682ba3898fa5
3
+ size 16778411
ckpts/universal/global_step120/zero/8.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60c4e65e4bceb984b2965f7e6cfc0ed6b59369efbe4bc7b9aa2b0ec63aff7506
3
+ size 16778317
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ea3507607b2c129631b16b1a49d6699d411f01dcc57d598f117e53f4967efb4
3
+ size 33555612
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fd36949e611fd74b5fbfcd4f0c6bcd04fd97f37ab3ac449a94827e7e5b2a87e
3
+ size 33555627
ckpts/universal/global_step120/zero/8.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:846a3b0c05469d767c79a0e0edb03ce465bf10046b8a0651f2f6a366339a0a51
3
+ size 33555533
ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd8e20433c17f091607c9862366643b5085e338b08d0a411b7d94da49e389dd3
3
+ size 33555612
ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c877cbf4a06255c7db9d34dc5ec8a425822ad1a94fee46f54ab9e51cca3461e0
3
+ size 33555627
ckpts/universal/global_step120/zero/9.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d26c26cf100853a6b3cfc71917f8dab485919dc440cdbf2ee3eb6ef9ec634ee2
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/_export/db/examples/autograd_function.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ class MyAutogradFunction(torch.autograd.Function):
7
+ @staticmethod
8
+ def forward(ctx, x):
9
+ return x.clone()
10
+
11
+ @staticmethod
12
+ def backward(ctx, grad_output):
13
+ return grad_output + 1
14
+
15
+
16
+ @export_case(
17
+ example_inputs=(torch.randn(3, 2),),
18
+ )
19
+ class AutogradFunction(torch.nn.Module):
20
+ """
21
+ TorchDynamo does not keep track of backward() on autograd functions. We recommend to
22
+ use `allow_in_graph` to mitigate this problem.
23
+ """
24
+
25
+ def forward(self, x):
26
+ return MyAutogradFunction.apply(x)
venv/lib/python3.10/site-packages/torch/_export/db/examples/cond_closed_over_variable.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+ from functorch.experimental.control_flow import cond
5
+
6
+
7
+ @export_case(
8
+ example_inputs=(torch.tensor(True), torch.ones(3, 2)),
9
+ tags={"torch.cond", "python.closure"},
10
+ )
11
+ class CondClosedOverVariable(torch.nn.Module):
12
+ """
13
+ torch.cond() supports branches closed over arbitrary variables.
14
+ """
15
+
16
+ def forward(self, pred, x):
17
+ def true_fn(val):
18
+ return x * 2
19
+
20
+ def false_fn(val):
21
+ return x - 2
22
+
23
+ return cond(pred, true_fn, false_fn, [x + 1])
venv/lib/python3.10/site-packages/torch/_export/db/examples/constrain_as_value_example.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.tensor(4), torch.randn(5, 5)),
8
+ tags={
9
+ "torch.dynamic-value",
10
+ "torch.escape-hatch",
11
+ },
12
+ )
13
+ class ConstrainAsValueExample(torch.nn.Module):
14
+ """
15
+ If the value is not known at tracing time, you can provide hint so that we
16
+ can trace further. Please look at constrain_as_value and constrain_as_size APIs.
17
+ constrain_as_value is used for values that don't need to be used for constructing
18
+ tensor.
19
+ """
20
+
21
+ def __init__(self):
22
+ super().__init__()
23
+
24
+ def forward(self, x, y):
25
+ a = x.item()
26
+ torch._constrain_as_value(a, min=0, max=5)
27
+
28
+ if a < 6:
29
+ return y.sin()
30
+ return y.cos()
venv/lib/python3.10/site-packages/torch/_export/db/examples/dynamic_shape_round.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+ from torch.export import Dim
5
+
6
+ x = torch.ones(3, 2)
7
+ dim0_x = Dim("dim0_x")
8
+
9
+ @export_case(
10
+ example_inputs=(x,),
11
+ tags={"torch.dynamic-shape", "python.builtin"},
12
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
13
+ dynamic_shapes={"x": {0: dim0_x}},
14
+ )
15
+ class DynamicShapeRound(torch.nn.Module):
16
+ """
17
+ Calling round on dynamic shapes is not supported.
18
+ """
19
+
20
+ def __init__(self):
21
+ super().__init__()
22
+
23
+ def forward(self, x):
24
+ return x[: round(x.shape[0] / 2)]
venv/lib/python3.10/site-packages/torch/_export/db/examples/list_contains.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.ones(3, 2),),
8
+ tags={"torch.dynamic-shape", "python.data-structure", "python.assert"},
9
+ )
10
+ class ListContains(torch.nn.Module):
11
+ """
12
+ List containment relation can be checked on a dynamic shape or constants.
13
+ """
14
+ def __init__(self):
15
+ super().__init__()
16
+
17
+ def forward(self, x):
18
+ assert x.size(-1) in [6, 2]
19
+ assert x.size(0) not in [4, 5, 6]
20
+ assert "monkey" not in ["cow", "pig"]
21
+ return x + x
venv/lib/python3.10/site-packages/torch/_export/db/examples/null_context_manager.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+
3
+ import torch
4
+
5
+ from torch._export.db.case import export_case
6
+
7
+
8
+ @export_case(
9
+ example_inputs=(torch.ones(3, 2),),
10
+ tags={"python.context-manager"},
11
+ )
12
+ class NullContextManager(torch.nn.Module):
13
+ """
14
+ Null context manager in Python will be traced out.
15
+ """
16
+
17
+ def __init__(self):
18
+ super().__init__()
19
+
20
+ def forward(self, x):
21
+ """
22
+ Null context manager in Python will be traced out.
23
+ """
24
+ ctx = contextlib.nullcontext()
25
+ with ctx:
26
+ return x.sin() + x.cos()
venv/lib/python3.10/site-packages/torch/_export/db/examples/optional_input.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.randn(2, 3),),
8
+ tags={"python.object-model"},
9
+ support_level=SupportLevel.NOT_SUPPORTED_YET,
10
+ )
11
+ class OptionalInput(torch.nn.Module):
12
+ """
13
+ Tracing through optional input is not supported yet
14
+ """
15
+
16
+ def forward(self, x, y=torch.ones(2, 3)):
17
+ if y is not None:
18
+ return x + y
19
+ return x
venv/lib/python3.10/site-packages/torch/_export/db/examples/specialized_attribute.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ import torch
4
+
5
+ from torch._export.db.case import export_case
6
+
7
+
8
+ class Animal(Enum):
9
+ COW = "moo"
10
+
11
+
12
+ @export_case(
13
+ example_inputs=(torch.ones(3, 2),),
14
+ )
15
+ class SpecializedAttribute(torch.nn.Module):
16
+ """
17
+ Model attributes are specialized.
18
+ """
19
+
20
+ def __init__(self):
21
+ super().__init__()
22
+ self.a = "moo"
23
+ self.b = 4
24
+
25
+ def forward(self, x):
26
+ if self.a == Animal.COW.value:
27
+ return x * x + self.b
28
+ else:
29
+ raise ValueError("bad")
venv/lib/python3.10/site-packages/torch/_export/db/examples/tensor_setattr.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel
4
+
5
+
6
+ @export_case(
7
+ example_inputs=(torch.randn(3, 2), "attr"),
8
+ tags={"python.builtin"},
9
+ support_level=SupportLevel.SUPPORTED,
10
+ )
11
+ class TensorSetattr(torch.nn.Module):
12
+ """
13
+ setattr() call onto tensors is not supported.
14
+ """
15
+ def forward(self, x, attr):
16
+ setattr(x, attr, torch.randn(3, 2))
17
+ return x + 4
venv/lib/python3.10/site-packages/torch/_export/db/examples/type_reflection_method.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from torch._export.db.case import export_case, SupportLevel, export_rewrite_case
4
+
5
+
6
+ class A:
7
+ @classmethod
8
+ def func(cls, x):
9
+ return 1 + x
10
+
11
+
12
+ @export_case(
13
+ example_inputs=(torch.ones(3, 4),),
14
+ tags={"python.builtin"},
15
+ support_level=SupportLevel.SUPPORTED,
16
+ )
17
+ class TypeReflectionMethod(torch.nn.Module):
18
+ """
19
+ type() calls on custom objects followed by attribute accesses are not allowed
20
+ due to its overly dynamic nature.
21
+ """
22
+
23
+ def __init__(self):
24
+ super().__init__()
25
+
26
+ def forward(self, x):
27
+ a = A()
28
+ return type(a).func(x)
29
+
30
+
31
+ @export_rewrite_case(parent=TypeReflectionMethod)
32
+ class TypeReflectionMethodRewrite(torch.nn.Module):
33
+ """
34
+ Custom object class methods will be inlined.
35
+ """
36
+
37
+ def __init__(self):
38
+ super().__init__()
39
+
40
+ def forward(self, x):
41
+ return A.func(x)
venv/lib/python3.10/site-packages/torch/_higher_order_ops/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .cond import cond
venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/auto_functionalize.cpython-310.pyc ADDED
Binary file (6.42 kB). View file
 
venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/cond.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/triton_kernel_wrap.cpython-310.pyc ADDED
Binary file (22.6 kB). View file
 
venv/lib/python3.10/site-packages/torch/_higher_order_ops/__pycache__/utils.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
venv/lib/python3.10/site-packages/torch/_higher_order_ops/auto_functionalize.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional, Tuple, Union
2
+
3
+ import torch
4
+ import torch.utils._pytree as pytree
5
+ from torch import Tensor
6
+ from torch._C import DispatchKey
7
+ from torch._ops import HigherOrderOperator
8
+ from torch._prims_common import clone_preserve_strides
9
+ from torch._subclasses.fake_tensor import FakeTensorMode
10
+ from torch.fx.experimental.proxy_tensor import (
11
+ disable_proxy_modes_tracing,
12
+ ProxyTorchDispatchMode,
13
+ track_tensor_tree,
14
+ )
15
+
16
+
17
+ # NOTE: [auto-functionalizing custom ops]
18
+ # Users may wish to torch.compile custom ops that mutate their inputs.
19
+ # torch.compile will automatically support this op without anyone needing
20
+ # to provide a functionalization kernel for it. Here's how.
21
+ #
22
+ # Let's say we have a hypothetical mylib::sin_(Tensor(a!) x) -> ()
23
+ # op. First, when FakeTensor sees this op:
24
+ # - If the schema says it returns nothing, we can generate a trivial
25
+ # FakeTensor rule for it (that returns nothing).
26
+ # - Otherwise, the user needs to provide a FakeTensor rule (abstract impl)
27
+ #
28
+ # Next, when Python FunctionalTensor sees the op, it will functionalize
29
+ # it by emitting a call to an auto_functionalize(op, ["x"], {"x": ...})
30
+ # HOP and replacing the mutated inputs with corresponding outputs of this HOP.
31
+ # This HOP effectively runs the functional version of the op when
32
+ # called: it clones inputs that will be mutated, runs the op, and
33
+ # then returns (output, Tensors with the new values)
34
+
35
+
36
+ class AutoFunctionalized(HigherOrderOperator):
37
+ """auto_functionalized(_mutable_op, **kwargs)
38
+
39
+ This HOP runs a "functional" version of _mutable_op.
40
+
41
+ Concretely, it looks at all the arguments that are mutable through
42
+ _mutable_op's operator schema, clones those kwargs, runs
43
+ `out = _mutable_op(**kwargs)` with the cloned values, and then returns the
44
+ operator output concatenated with the cloned values that were mutated.
45
+
46
+ We have some restrictions on `_mutable_op`.
47
+ See `can_auto_functionalize` for the restrictions. We can likely lift
48
+ many of these if users request it.
49
+
50
+ The reason why _mutable_op is prefixed with an
51
+ underscore is to prevent collisions with kwarg names in **kwargs.
52
+ """
53
+
54
+ def __init__(self):
55
+ super().__init__("auto_functionalized")
56
+
57
+ def __call__(
58
+ self,
59
+ _mutable_op: torch._ops.OpOverload,
60
+ **kwargs: Dict[str, Any],
61
+ ) -> Tuple[Any, Tuple[Tensor, ...]]:
62
+ assert can_auto_functionalize(_mutable_op)
63
+ assert isinstance(kwargs, dict)
64
+ return super().__call__(_mutable_op, **kwargs)
65
+
66
+
67
+ auto_functionalized = AutoFunctionalized()
68
+
69
+
70
+ def can_auto_functionalize(op: torch._ops.OperatorBase) -> bool:
71
+ if not isinstance(op, torch._ops.OpOverload):
72
+ return False
73
+
74
+ if torch._library.utils.is_builtin(op):
75
+ # We control the built-ins. These may (in rare cases)
76
+ # do input metadata mutation (which we have banned on custom ops)
77
+ return False
78
+ schema = op._schema
79
+ if not schema.is_mutable:
80
+ return False
81
+ schema = op._schema
82
+
83
+ for arg in schema.arguments:
84
+ if arg.alias_info is None:
85
+ continue
86
+ if not arg.alias_info.is_write:
87
+ continue
88
+ if type(arg.type) is torch.TensorType:
89
+ continue
90
+ if (
91
+ type(arg.type) is torch.OptionalType
92
+ and type(arg.type.getElementType()) is torch.TensorType
93
+ ):
94
+ continue
95
+ # Not yet supported: other Tensor types. This includes things like
96
+ # Tensor[], Tensor?[], Tensor[]?.
97
+ return False
98
+
99
+ # The returns must not alias anything
100
+ for ret in schema.returns:
101
+ if ret.alias_info is None and type(ret.type) is torch.TensorType:
102
+ continue
103
+ # Not yet supported: List[Tensor] return.
104
+ return False
105
+ return True
106
+
107
+
108
+ @auto_functionalized.py_impl(DispatchKey.CompositeExplicitAutograd)
109
+ def auto_functionalized_dense(
110
+ _mutable_op: torch._ops.OpOverload,
111
+ _only_clone_these_tensors: Optional[Tuple[str, ...]] = None,
112
+ **kwargs: Dict[str, Any],
113
+ ) -> Tuple[Any, Tuple[Tensor, ...]]:
114
+ new_kwargs = dict(**kwargs)
115
+ result = []
116
+
117
+ _mutable_args_names = get_mutable_arg_names(_mutable_op)
118
+ for name in _mutable_args_names:
119
+ if (
120
+ _only_clone_these_tensors is not None
121
+ and name not in _only_clone_these_tensors
122
+ ):
123
+ new_kwargs[name] = kwargs[name]
124
+ else:
125
+ new_kwargs[name] = (
126
+ clone_preserve_strides(kwargs[name])
127
+ if kwargs[name] is not None
128
+ else None
129
+ )
130
+ result.append(new_kwargs[name])
131
+ out = _mutable_op(**new_kwargs)
132
+
133
+ if isinstance(out, tuple):
134
+ return (*out, *result) # type: ignore[return-value]
135
+ else:
136
+ return (out, *result) # type: ignore[return-value]
137
+
138
+
139
+ @auto_functionalized.py_impl(FakeTensorMode)
140
+ def auto_functionalized_fake(
141
+ mode,
142
+ _mutable_op: torch._ops.OpOverload,
143
+ **kwargs: Dict[str, Any],
144
+ ) -> Tuple[Any, Tuple[Tensor, ...]]:
145
+ with mode:
146
+ result = auto_functionalized_dense(_mutable_op, **kwargs)
147
+ return result
148
+
149
+
150
+ @auto_functionalized.py_impl(ProxyTorchDispatchMode)
151
+ def auto_functionalized_proxy(
152
+ mode,
153
+ _mutable_op: torch._ops.OpOverload,
154
+ **kwargs: Dict[str, Any],
155
+ ) -> Tuple[Any, Tuple[Tensor, ...]]:
156
+ if not mode.enable_tracing:
157
+ return auto_functionalized(_mutable_op, **kwargs)
158
+
159
+ with disable_proxy_modes_tracing():
160
+ out = auto_functionalized(_mutable_op, **kwargs)
161
+
162
+ proxy_kwargs = pytree.tree_map(mode.tracer.unwrap_proxy, kwargs)
163
+ out_proxy = mode.tracer.create_proxy(
164
+ "call_function",
165
+ auto_functionalized,
166
+ (_mutable_op,),
167
+ proxy_kwargs,
168
+ )
169
+ result = track_tensor_tree(out, out_proxy, constant=None, tracer=mode.tracer)
170
+ return result
171
+
172
+
173
+ auto_functionalized.fallthrough(DispatchKey.AutogradCPU)
174
+ auto_functionalized.fallthrough(DispatchKey.AutogradCUDA)
175
+
176
+
177
+ def get_mutable_arg_names(op: torch._ops.OpOverload) -> List[str]:
178
+ """
179
+ Returns the list of argument names that get mutated according to the
180
+ schema.
181
+ """
182
+ mutable_args_names = [
183
+ arg.name
184
+ for arg in op._schema.arguments
185
+ if arg.alias_info is not None and arg.alias_info.is_write
186
+ ]
187
+ return mutable_args_names
188
+
189
+
190
+ def do_auto_functionalize(
191
+ op: torch._ops.OpOverload, args: Tuple[Any, ...], kwargs: Dict[str, Any]
192
+ ) -> Any:
193
+ """Functionalizes a call to op(*args, **kwargs) by emitting a call to
194
+ `outs = auto_functionalized(op, normalized_kwargs)`
195
+ and replacing the mutated (args, kwargs) with the corresponding outputs.
196
+
197
+ The normalized_kwargs are just the (args, kwargs), but all in kwarg form.
198
+ This makes handling easier for the auto_functionalized HOP.
199
+ """
200
+ from torch._subclasses.functional_tensor import PythonFunctionalizeAPI
201
+
202
+ ctx = PythonFunctionalizeAPI()
203
+
204
+ # All of the (args, kwargs), but all as kwargs. The names for the
205
+ # args come from the schema. This makes it easier for us to work with them.
206
+ normalized_kwargs = {}
207
+ schema = op._schema
208
+ for idx, arg in enumerate(schema.arguments):
209
+ # NB: torch_dispatch kwargs are the args defined as kwarg-only in the schema
210
+ if arg.name in kwargs:
211
+ normalized_kwargs[arg.name] = kwargs[arg.name]
212
+ elif idx < len(args):
213
+ # if its out of bounds we don't need to do anything
214
+ # as it means the the optional arg was passed with its default
215
+ # value
216
+ normalized_kwargs[arg.name] = args[idx]
217
+ else:
218
+ normalized_kwargs[arg.name] = arg.default_value
219
+
220
+ unwrapped_kwargs = ctx.unwrap_tensors(normalized_kwargs) # type: ignore[arg-type]
221
+ with ctx.redispatch_to_next():
222
+ unwrapped_outs = auto_functionalized(
223
+ op, **unwrapped_kwargs # type: ignore[arg-type]
224
+ )
225
+
226
+ # List of the name of args that get mutated (according to the schema)
227
+ mutable_args_names = get_mutable_arg_names(op)
228
+
229
+ unwrapped_actual_out: Union[Any, Tuple[Any]] = unwrapped_outs[
230
+ : -len(mutable_args_names)
231
+ ]
232
+ unwrapped_mutable_out = unwrapped_outs[-len(mutable_args_names) :]
233
+
234
+ if len(op._schema.returns) == 0:
235
+ assert unwrapped_actual_out[0] is None
236
+ unwrapped_actual_out = None
237
+ elif len(op._schema.returns) == 1:
238
+ assert len(unwrapped_actual_out) == 1
239
+ unwrapped_actual_out = unwrapped_actual_out[0]
240
+ else:
241
+ assert len(unwrapped_actual_out) == len(op._schema.returns)
242
+
243
+ for name, unwrapped_out in zip(mutable_args_names, unwrapped_mutable_out):
244
+ # Can be None if input was `Tensor(a!)?`
245
+ if unwrapped_out is None:
246
+ continue
247
+ assert isinstance(unwrapped_out, torch.Tensor)
248
+ orig_arg = normalized_kwargs[name]
249
+ ctx.replace(orig_arg, unwrapped_out)
250
+ ctx.commit_update(orig_arg)
251
+ ctx.sync(orig_arg)
252
+
253
+ return ctx.wrap_tensors(unwrapped_actual_out) # type: ignore[arg-type]
254
+
255
+
256
+ @auto_functionalized.py_functionalize_impl
257
+ def auto_functionalized_func(ctx, _mutable_op, **kwargs):
258
+ unwrapped_kwargs = ctx.unwrap_tensors(kwargs)
259
+ with ctx.redispatch_to_next():
260
+ result = auto_functionalized(_mutable_op, **unwrapped_kwargs)
261
+ return ctx.wrap_tensors(result)