applied-ai-018 commited on
Commit
bd5e920
·
verified ·
1 Parent(s): 049ff97

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/_dispatch/__init__.py +0 -0
  5. venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/torch/_dispatch/python.py +178 -0
  8. venv/lib/python3.10/site-packages/torch/nested/_internal/__init__.py +0 -0
  9. venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/torch/profiler/__init__.py +48 -0
  14. venv/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py +1202 -0
  18. venv/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py +662 -0
  19. venv/lib/python3.10/site-packages/torch/profiler/_utils.py +373 -0
  20. venv/lib/python3.10/site-packages/torch/profiler/itt.py +78 -0
  21. venv/lib/python3.10/site-packages/torch/profiler/profiler.py +839 -0
  22. venv/lib/python3.10/site-packages/torch/profiler/python_tracer.py +20 -0
  23. venv/lib/python3.10/site-packages/torch/quantization/__init__.py +87 -0
  24. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py +28 -0
  41. venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py +26 -0
  42. venv/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py +132 -0
  43. venv/lib/python3.10/site-packages/torch/quantization/fake_quantize.py +32 -0
  44. venv/lib/python3.10/site-packages/torch/quantization/fuse_modules.py +22 -0
  45. venv/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py +15 -0
  46. venv/lib/python3.10/site-packages/torch/quantization/fx/__init__.py +15 -0
  47. venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fuse.cpython-310.pyc +0 -0
ckpts/universal/global_step120/zero/20.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:804bfc39c42c3e6b7b273b3e574c18f8266b71f5cc2b7abb8c54419ce359d7ef
3
+ size 33555627
ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aaf3920b1d3a46da3fea9d23caff1bda67796db5391c115c2742b5dbae1c7c79
3
+ size 9387
ckpts/universal/global_step120/zero/25.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d62ada06ca7be555e98bd9b2d36730aff6115d6d9def590f39f56c0ff90c3580
3
+ size 9293
venv/lib/python3.10/site-packages/torch/_dispatch/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/_dispatch/__pycache__/python.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
venv/lib/python3.10/site-packages/torch/_dispatch/python.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import unittest.mock
3
+ from contextlib import contextmanager
4
+ from typing import Iterator
5
+
6
+ import torch
7
+ import torch._C
8
+ import torch._ops
9
+ import torch.utils._python_dispatch
10
+ import torch.utils._pytree as pytree
11
+
12
+ __all__ = ["enable_python_dispatcher", "no_python_dispatcher", "enable_pre_dispatch"]
13
+
14
+ no_python_dispatcher = torch._C._DisablePythonDispatcher
15
+ enable_python_dispatcher = torch._C._EnablePythonDispatcher
16
+ enable_pre_dispatch = torch._C._EnablePreDispatch
17
+
18
+ CROSSREF_FUNCTIONALIZE = False
19
+
20
+
21
+ def all_py_loaded_overloads() -> Iterator[torch._ops.OpOverload]:
22
+ """
23
+ Warning: the set of overloads this will report is very subtle. It is precisely
24
+ the set of torch.ops functions that have actually been accessed from Python
25
+ (e.g., we actually called torch.ops.aten.blah at some point. This is DIFFERENT
26
+ from the set of registered operators, which will in general be a larger set,
27
+ as this would include all operators which we ran C++ static initializers or
28
+ Python operator registration on. This does not eagerly populate the list on
29
+ torch.ops.aten; this list is lazy!
30
+
31
+ In other words, this is good for traversing over everything that has an
32
+ OpOverload object allocated in Python. We use it for cache invalidation, but
33
+ don't rely on this list being complete.
34
+
35
+ Note that even if we did report all C++ registered overloads, this isn't guaranteed
36
+ to be complete either, as a subsequent lazy load of a library which triggers more
37
+ registrations could add more things to the set.
38
+ """
39
+ for ns in torch.ops:
40
+ packets = getattr(torch.ops, ns)
41
+ for op_name in packets:
42
+ packet = getattr(packets, op_name)
43
+ for overload in packet:
44
+ yield getattr(packet, overload)
45
+
46
+
47
+ @contextmanager
48
+ def suspend_functionalization():
49
+ f_tls = torch._C._dispatch_tls_is_dispatch_key_included(
50
+ torch._C.DispatchKey.Functionalize
51
+ )
52
+ f_rv = torch._C._functionalization_reapply_views_tls()
53
+ if f_tls:
54
+ torch._disable_functionalization()
55
+ try:
56
+ yield
57
+ finally:
58
+ if f_tls:
59
+ torch._enable_functionalization(reapply_views=f_rv)
60
+
61
+
62
+ def check_tensor_metadata_matches(nv, rv, desc):
63
+ assert callable(desc)
64
+ assert nv.size() == rv.size(), f"{desc()}: sizes {nv.size()} != {rv.size()}"
65
+ assert nv.dtype == rv.dtype, f"{desc()}: dtype {nv.dtype} != {rv.dtype}"
66
+ same_strides, idx = torch._prims_common.check_significant_strides(
67
+ nv, rv, only_cuda=False
68
+ )
69
+ assert (
70
+ same_strides
71
+ ), f"{desc()}: strides {nv.stride()} != {rv.stride()} (mismatch at index {idx})"
72
+
73
+
74
+ def check_metadata_matches(n, r, desc):
75
+ assert callable(desc)
76
+ n_vals, n_spec = pytree.tree_flatten(n)
77
+ r_vals, r_spec = pytree.tree_flatten(r)
78
+ # TODO: test the specs match; empirically sometimes we have a tuple
79
+ # on one side and a list on the other
80
+ assert len(n_vals) == len(r_vals), f"{len(n_vals)} != {len(r_vals)}"
81
+ for i, nv, rv in zip(range(len(n_vals)), n_vals, r_vals):
82
+ if not isinstance(rv, torch.Tensor):
83
+ continue
84
+ check_tensor_metadata_matches(nv, rv, lambda: f"{desc()} output {i}")
85
+
86
+
87
+ class Lit:
88
+ def __init__(self, s):
89
+ self.s = s
90
+
91
+ def __repr__(self):
92
+ return self.s
93
+
94
+
95
+ def _fmt(a: object) -> object:
96
+ if isinstance(a, torch.Tensor):
97
+ return Lit(
98
+ f"torch.empty_strided({tuple(a.size())}, {a.stride()}, dtype={a.dtype})"
99
+ )
100
+ else:
101
+ return a
102
+
103
+
104
+ def make_crossref_functionalize(op, final_key):
105
+ from torch._subclasses.fake_tensor import FakeTensorMode
106
+
107
+ # This case is pretty weird, suppress it for now
108
+ if op == torch.ops.aten.lift_fresh.default:
109
+ return final_key
110
+
111
+ def handler(*args, **kwargs):
112
+ fake_mode = FakeTensorMode()
113
+
114
+ def fakeify_defun(t):
115
+ if isinstance(t, torch.Tensor):
116
+ if torch._is_functional_tensor(t):
117
+ r = torch._from_functional_tensor(t)
118
+ # NB: This assumes that the inner tensor sizes/strides match
119
+ # the outer tensor sizes/strides. This doesn't necessarily have to
120
+ # be the case, see discussion at
121
+ # https://github.com/pytorch/pytorch/pull/87610/files/401ddeda1d769bedc88a12de332c7357b60e51a4#r1007264456
122
+ assert t.size() == r.size()
123
+ assert t.stride() == r.stride()
124
+ else:
125
+ r = t
126
+ # TODO: suppress guards
127
+ return fake_mode.from_tensor(r)
128
+ return t
129
+
130
+ def maybe_detach(t):
131
+ if isinstance(t, torch.Tensor):
132
+ return t.detach()
133
+ else:
134
+ return t
135
+
136
+ # TODO: This probably does the wrong thing if you're running other
137
+ # substantive modes with the normal op outside here
138
+ with torch.utils._python_dispatch._disable_current_modes(), suspend_functionalization():
139
+ f_args, f_kwargs = pytree.tree_map(fakeify_defun, (args, kwargs))
140
+ orig_f_args, orig_f_kwargs = pytree.tree_map(
141
+ maybe_detach, (f_args, f_kwargs)
142
+ )
143
+ with fake_mode:
144
+ f_r = op(*f_args, **f_kwargs)
145
+ r = op._op_dk(final_key, *args, **kwargs)
146
+
147
+ def desc():
148
+ fmt_args = ", ".join(
149
+ itertools.chain(
150
+ (repr(pytree.tree_map(_fmt, a)) for a in orig_f_args),
151
+ (
152
+ f"{k}={pytree.tree_map(_fmt, v)}"
153
+ for k, v in orig_f_kwargs.items()
154
+ ),
155
+ )
156
+ )
157
+ return f"{op}({fmt_args})"
158
+
159
+ check_metadata_matches(f_r, r, desc)
160
+ return r
161
+
162
+ return handler
163
+
164
+
165
+ # NB: enabling this is slow, don't do it in a hot loop. This is purely
166
+ # for debugging purposes.
167
+ @contextmanager
168
+ def enable_crossref_functionalize():
169
+ for op in all_py_loaded_overloads():
170
+ op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
171
+ try:
172
+ with enable_python_dispatcher(), unittest.mock.patch(
173
+ "torch._dispatch.python.CROSSREF_FUNCTIONALIZE", True
174
+ ):
175
+ yield
176
+ finally:
177
+ for op in all_py_loaded_overloads():
178
+ op._uncache_dispatch(torch._C.DispatchKey.Functionalize)
venv/lib/python3.10/site-packages/torch/nested/_internal/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc ADDED
Binary file (27.2 kB). View file
 
venv/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc ADDED
Binary file (11.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/profiler/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ PyTorch Profiler is a tool that allows the collection of performance metrics during training and inference.
3
+ Profiler's context manager API can be used to better understand what model operators are the most expensive,
4
+ examine their input shapes and stack traces, study device kernel activity and visualize the execution trace.
5
+
6
+ .. note::
7
+ An earlier version of the API in :mod:`torch.autograd` module is considered legacy and will be deprecated.
8
+
9
+ """
10
+ import os
11
+
12
+ from torch._C._autograd import _supported_activities, DeviceType, kineto_available
13
+ from torch._C._profiler import _ExperimentalConfig, ProfilerActivity, RecordScope
14
+ from torch.autograd.profiler import KinetoStepTracker, record_function
15
+ from torch.optim.optimizer import register_optimizer_step_post_hook
16
+
17
+ from .profiler import (
18
+ _KinetoProfile,
19
+ ExecutionTraceObserver,
20
+ profile,
21
+ ProfilerAction,
22
+ schedule,
23
+ supported_activities,
24
+ tensorboard_trace_handler,
25
+ )
26
+
27
+ __all__ = [
28
+ "profile",
29
+ "schedule",
30
+ "supported_activities",
31
+ "tensorboard_trace_handler",
32
+ "ProfilerAction",
33
+ "ProfilerActivity",
34
+ "kineto_available",
35
+ "DeviceType",
36
+ "record_function",
37
+ "ExecutionTraceObserver",
38
+ ]
39
+
40
+ from . import itt
41
+
42
+
43
+ def _optimizer_post_hook(optimizer, args, kwargs):
44
+ KinetoStepTracker.increment_step("Optimizer")
45
+
46
+
47
+ if os.environ.get("KINETO_USE_DAEMON", None):
48
+ _ = register_optimizer_step_post_hook(_optimizer_post_hook)
venv/lib/python3.10/site-packages/torch/profiler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
venv/lib/python3.10/site-packages/torch/profiler/__pycache__/_pattern_matcher.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
venv/lib/python3.10/site-packages/torch/profiler/__pycache__/profiler.cpython-310.pyc ADDED
Binary file (29.8 kB). View file
 
venv/lib/python3.10/site-packages/torch/profiler/_memory_profiler.py ADDED
@@ -0,0 +1,1202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import dataclasses
3
+ import enum
4
+ import itertools as it
5
+ import logging
6
+ from typing import (
7
+ Any,
8
+ cast,
9
+ DefaultDict,
10
+ Dict,
11
+ Iterator,
12
+ List,
13
+ Optional,
14
+ Set,
15
+ Tuple,
16
+ Union,
17
+ )
18
+
19
+ from typing_extensions import Literal
20
+
21
+ import torch
22
+ from torch._C import FunctionSchema
23
+ from torch._C._autograd import _ProfilerResult
24
+ from torch._C._profiler import (
25
+ _EventType,
26
+ _ExtraFields_Allocation,
27
+ _ExtraFields_TorchOp,
28
+ _ProfilerEvent,
29
+ _TensorMetadata,
30
+ RecordScope,
31
+ )
32
+ from torch._utils import _element_size
33
+ from torch.profiler import _utils
34
+
35
+ KeyAndID = Tuple["Key", int]
36
+ TensorAndID = Tuple["TensorKey", int]
37
+
38
+ log = logging.getLogger(__name__)
39
+
40
+
41
+ class Category(enum.Enum):
42
+ INPUT = enum.auto()
43
+ TEMPORARY = enum.auto()
44
+ ACTIVATION = enum.auto()
45
+ GRADIENT = enum.auto()
46
+ AUTOGRAD_DETAIL = enum.auto()
47
+ PARAMETER = enum.auto()
48
+ OPTIMIZER_STATE = enum.auto()
49
+
50
+
51
+ _CATEGORY_TO_COLORS = {
52
+ Category.PARAMETER: "darkgreen",
53
+ Category.OPTIMIZER_STATE: "goldenrod",
54
+ Category.INPUT: "black",
55
+ Category.TEMPORARY: "mediumpurple",
56
+ Category.ACTIVATION: "red",
57
+ Category.GRADIENT: "mediumblue",
58
+ Category.AUTOGRAD_DETAIL: "royalblue",
59
+ None: "grey",
60
+ }
61
+
62
+ _CATEGORY_TO_INDEX = {c: i for i, c in enumerate(_CATEGORY_TO_COLORS)}
63
+
64
+
65
+ class Action(enum.Enum):
66
+ PREEXISTING = enum.auto()
67
+ CREATE = enum.auto()
68
+ INCREMENT_VERSION = enum.auto()
69
+ DESTROY = enum.auto()
70
+
71
+
72
+ _ACTION_TO_INDEX = {i: i.value for i in Action}
73
+
74
+
75
+ @dataclasses.dataclass(eq=True, unsafe_hash=False, frozen=True)
76
+ class Key:
77
+ device: torch.device
78
+
79
+
80
+ @dataclasses.dataclass
81
+ class _Storage:
82
+ """Bundle storage pointer and id.
83
+
84
+ All profiling logic should use `allocation_id`, however it is useful to
85
+ print storage pointers for debugging and unit tests sometimes look up
86
+ values using the storage data pointer of a live Tensor."""
87
+
88
+ ptr: int
89
+ allocation_id: int
90
+
91
+ def __repr__(self) -> str:
92
+ return f"{hex(self.ptr):>18} ({self.allocation_id})"
93
+
94
+ def __eq__(self, other: object) -> bool:
95
+ return isinstance(other, _Storage) and self.allocation_id == other.allocation_id
96
+
97
+ def __hash__(self) -> int:
98
+ return hash(self.allocation_id)
99
+
100
+
101
+ @dataclasses.dataclass(eq=True, unsafe_hash=True, frozen=True)
102
+ class TensorKey(Key):
103
+ """Hashable identifier for a storage which has been asigned an ID.
104
+
105
+ A detailed description of Tensor IDs and why they are needed is given in
106
+ `torch/csrc/profiler/collection.h` when `TensorID` is declared. To
107
+ summarize, multiple Storage buffers can map to the same logical Tensor.
108
+ This dataclass is used to refer to a concrete in-memory StorageImpl of
109
+ a Tensor.
110
+ """
111
+
112
+ id: int
113
+ storage: _Storage
114
+
115
+ def __repr__(self) -> str:
116
+ return f"id={self.id}: {repr(self.storage):<24} ({self.device})"
117
+
118
+ def __lt__(self, other: "TensorKey") -> bool:
119
+ return self._as_sortable < other._as_sortable
120
+
121
+ @staticmethod
122
+ def _make(
123
+ tensor_id: Optional[int],
124
+ storage_ptr: Optional[int],
125
+ allocation_id: Optional[int],
126
+ device: torch.device,
127
+ ) -> Optional["TensorKey"]:
128
+ if (
129
+ tensor_id is not None
130
+ and storage_ptr is not None
131
+ and allocation_id is not None
132
+ ):
133
+ return TensorKey(device, tensor_id, _Storage(storage_ptr, allocation_id))
134
+ return None
135
+
136
+ @classmethod
137
+ def from_allocation(cls, alloc: _ExtraFields_Allocation) -> Optional["TensorKey"]:
138
+ return cls._make(alloc.id, alloc.ptr, alloc.allocation_id, alloc.device)
139
+
140
+ @classmethod
141
+ def from_tensor(cls, t: Optional[_TensorMetadata]) -> Optional["TensorKey"]:
142
+ if t is not None:
143
+ return cls._make(t.id, t.storage_data_ptr, t.allocation_id, t.device)
144
+ return None
145
+
146
+ @property
147
+ def _as_sortable(self) -> Tuple[int, int, str, int]:
148
+ return self.id, self.storage.allocation_id, self.device.type, self.device.index
149
+
150
+
151
+ def _extract_parameters_and_gradients(
152
+ node: _ProfilerEvent,
153
+ ) -> Iterator[Tuple[Optional[TensorKey], Optional[TensorKey]]]:
154
+ children = node.children
155
+
156
+ # AccumulateGrad is used in the Autograd engine to handle gradient updates.
157
+ # There are two possible cases:
158
+ # 1) This is a newly created gradient Tensor. In that case there is nothing
159
+ # to accumulate, so autograd simply detaches the Tensor.
160
+ #
161
+ # 2) There is a preexisting gradient Tensor and we need to add the newly
162
+ # computed update. This is done with an in-place add (aten::add_) op.
163
+ # (The underscore suffix denotes "in-place".)
164
+ if (
165
+ node.typed[0] == _EventType.TorchOp
166
+ and node.typed[1].scope == RecordScope.BACKWARD_FUNCTION
167
+ # TODO(robieta): Move away from load bearing names
168
+ and node.name == "torch::autograd::AccumulateGrad"
169
+ and children
170
+ and children[0].typed[0] == _EventType.TorchOp
171
+ and children[0].name in ("aten::detach", "aten::add_")
172
+ and children[0].typed[1].inputs
173
+ and isinstance(children[0].typed[1].inputs[0], _TensorMetadata)
174
+ ):
175
+ yield None, TensorKey.from_tensor(children[0].typed[1].inputs[0])
176
+
177
+ # We directly instrument `torch.nn.Module` and `torch.optim.Optimizer`
178
+ # NOTE: The values captured by the python tracer are cached; they can be
179
+ # used to build up labels but do not imply that a Tensor was live at
180
+ # a particular time.
181
+ elif node.typed[0] == _EventType.PyCall:
182
+ typed_fields = node.typed[1]
183
+ assert typed_fields.module is None or typed_fields.optimizer is None
184
+ if typed_fields.module is not None:
185
+ for _, p, p_grad in typed_fields.module.parameters:
186
+ yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad)
187
+
188
+ if typed_fields.optimizer is not None:
189
+ for p, p_grad, _ in typed_fields.optimizer.parameters:
190
+ yield TensorKey.from_tensor(p), TensorKey.from_tensor(p_grad)
191
+
192
+
193
+ def extract_parameters(node: _ProfilerEvent) -> Iterator[TensorKey]:
194
+ for p, p_grad in _extract_parameters_and_gradients(node):
195
+ if p is not None:
196
+ yield p
197
+
198
+
199
+ def extract_gradients(
200
+ node: _ProfilerEvent,
201
+ ) -> Iterator[Tuple[Optional[TensorKey], TensorKey]]:
202
+ for p, p_grad in _extract_parameters_and_gradients(node):
203
+ if p_grad is not None:
204
+ yield p, p_grad
205
+
206
+
207
+ def get_scopes(event: Optional[_ProfilerEvent]) -> Tuple[RecordScope, ...]:
208
+ scopes = []
209
+ while event:
210
+ if event.typed[0] == _EventType.TorchOp:
211
+ scopes.append(event.typed[1].scope)
212
+ event = event.parent
213
+ return tuple(scopes)
214
+
215
+
216
+ class SchemaMatcher:
217
+ """Lookup operator schema based on profiled name.
218
+
219
+ When profiling we record the operator's name but not the schema. However
220
+ some analysis requires that information. Fortunately we can look up
221
+ registered schema from the recorded name. We do not, however, record the
222
+ overload and so we must compare the profiled arguments with all overloads
223
+ to determine viable matches.
224
+
225
+ Note: Once https://github.com/pytorch/pytorch/issues/78871 is completed
226
+ this code will be obsolete.
227
+ """
228
+
229
+ @classmethod
230
+ def inputs_are_mutable(cls, t: _ExtraFields_TorchOp) -> Tuple[Optional[bool], ...]:
231
+ """Determine which inputs may have mutated based on function schema.
232
+
233
+ Note that we don't need to resolve down to a single schema to perform
234
+ this analysis. An input is mutable if it is mutable in any overload. In
235
+ practice, however, it is overwhelmingly common to match a single
236
+ overload. If we cannot find any valid schema then we must be
237
+ conservative and assume all inputs are mutable.
238
+ """
239
+ mutable: Optional[List[bool]] = None
240
+ for schema in cls.match_schemas(t):
241
+ mutable = mutable or [False for _ in schema.arguments]
242
+ for i, arg in enumerate(schema.arguments):
243
+ mutable[i] |= getattr(arg.alias_info, "is_write", False)
244
+
245
+ return tuple(mutable or (None for _ in t.inputs))
246
+
247
+ @classmethod
248
+ def match_schemas(cls, t: _ExtraFields_TorchOp) -> Tuple[FunctionSchema, ...]:
249
+ signature = tuple(
250
+ # Tensor
251
+ TensorKey.from_tensor(i) if isinstance(i, _TensorMetadata)
252
+ #
253
+ # TensorList
254
+ else [TensorKey.from_tensor(j) for j in i] if isinstance(i, list)
255
+ #
256
+ # Scalar and uncaptured inputs.
257
+ else i
258
+ for i in t.inputs
259
+ )
260
+
261
+ def matches(schema) -> bool:
262
+ return len(schema.arguments) == len(signature) and all(
263
+ cls._types_match(observed, schema_arg.type)
264
+ for observed, schema_arg in zip(signature, schema.arguments)
265
+ )
266
+
267
+ return tuple(s for s in cls.lookup_schemas(t.name) or () if matches(s))
268
+
269
+ @classmethod
270
+ def _types_match(cls, observed, schema_type) -> bool:
271
+ if isinstance(schema_type, torch._C.OptionalType):
272
+ schema_type = schema_type.getElementType()
273
+ return observed is None or cls._types_match(observed, schema_type)
274
+
275
+ if isinstance(schema_type, torch._C.AnyType):
276
+ return True
277
+
278
+ if schema_type.isSubtypeOf(torch._C.ListType.ofTensors()):
279
+ return isinstance(observed, list) and all(
280
+ isinstance(i, TensorKey) for i in observed
281
+ )
282
+
283
+ type_map: Tuple[Tuple[Any, Union[type, Tuple[type, ...]]], ...] = (
284
+ (torch._C.TensorType, TensorKey),
285
+ (torch._C.NoneType, type(None)),
286
+ (torch._C.BoolType, bool),
287
+ (torch._C.IntType, int),
288
+ (torch._C.FloatType, float),
289
+ (torch._C.ComplexType, complex),
290
+ (torch._C.NumberType, (bool, int, float, complex)),
291
+ )
292
+
293
+ for jit_type, py_types in type_map:
294
+ if isinstance(schema_type, jit_type):
295
+ return isinstance(observed, py_types)
296
+
297
+ # Profiler only records a subset of possible argument types. If we
298
+ # reach this point then the schema must call for a type that profiler
299
+ # does not record. Thus, the schema can only be a match if `observed`
300
+ # is also None.
301
+ return observed is None
302
+
303
+ @staticmethod
304
+ def lookup_schemas(name: str) -> Optional[Tuple[FunctionSchema, ...]]:
305
+ # TODO(robieta):
306
+ # _jit_get_schemas_for_operator is quite expensive. (~100us / call)
307
+ # Consider adding `functools.lru_cache` if that becomes an issue.
308
+
309
+ try:
310
+ # Schema lookup will throw if `name` is malformed. (For example,
311
+ # schemas must be namespaced and schema lookup will fail if name
312
+ # does not include "::".) We simply catch the exception and return
313
+ # `None` to denote that `name` cannot be an operator name.
314
+ #
315
+ # Note that record_function annotations also go through this path,
316
+ # so it is expected that some names will not correspond to PyTorch
317
+ # operators.
318
+ if "::" not in name:
319
+ return None
320
+ return tuple(torch._C._jit_get_schemas_for_operator(name))
321
+ except RuntimeError:
322
+ return None
323
+
324
+
325
+ class OpTree:
326
+ def __init__(self, result: _ProfilerResult) -> None:
327
+ self._root_nodes = result.experimental_event_tree()
328
+ self._sorted_nodes = tuple(sorted(self.dfs(), key=lambda x: x.start_time_ns))
329
+
330
+ def dfs(self, *args, **kwargs) -> Iterator[_ProfilerEvent]:
331
+ yield from _utils.traverse_dfs(self._root_nodes, *args, **kwargs)
332
+
333
+ @property
334
+ def sorted_nodes(self) -> Tuple[_ProfilerEvent, ...]:
335
+ return self._sorted_nodes
336
+
337
+
338
+ class SizeMap:
339
+ def __init__(self, op_tree: OpTree) -> None:
340
+ self._values: Dict[TensorKey, int] = {}
341
+
342
+ for node in op_tree.sorted_nodes:
343
+ if node.typed[0] == _EventType.TorchOp:
344
+ for t in self._flat_tensor_inputs(node.typed[1]):
345
+ self._update_values(t)
346
+
347
+ elif node.typed[0] == _EventType.PyCall:
348
+ typed_fields = node.typed[1]
349
+ assert typed_fields.module is None or typed_fields.optimizer is None
350
+ if typed_fields.module is not None:
351
+ for _, p, p_grad in typed_fields.module.parameters:
352
+ self._update_values(p)
353
+ self._update_values(p_grad)
354
+
355
+ if typed_fields.optimizer is not None:
356
+ for p, p_grad, state in typed_fields.optimizer.parameters:
357
+ self._update_values(p)
358
+ self._update_values(p_grad)
359
+ for _, t in state:
360
+ self._update_values(t)
361
+
362
+ allocations: Dict[TensorKey, int] = {}
363
+ for node in op_tree.sorted_nodes:
364
+ if node.typed[0] == _EventType.Allocation:
365
+ alloc_fields = node.typed[1]
366
+ key = TensorKey.from_allocation(alloc_fields)
367
+ if key:
368
+ new_size = abs(alloc_fields.alloc_size)
369
+ prior_size = allocations.setdefault(key, new_size)
370
+
371
+ # It is possible to resize Storage in PyTorch, however we
372
+ # key on data pointer so most resizes will be treated as a
373
+ # change in storage. The one corner case that cannot be
374
+ # handled is `realloc` which successfully resizes the
375
+ # storage. At time of writing this is not done anywhere in
376
+ # the core PyTorch codebase.
377
+ if prior_size != new_size:
378
+ delta = f"{prior_size} vs. {new_size}"
379
+ log.warning("Mismatch between allocation and free: %s", delta)
380
+
381
+ self._values.update(allocations)
382
+
383
+ def _update_values(self, t: Optional[_TensorMetadata]) -> None:
384
+ key = TensorKey.from_tensor(t)
385
+ if key is not None and t is not None and t.layout == torch.strided:
386
+ # Scalars are represented as zero dim Tensors
387
+ n = max(i[0] * i[1] for i in zip(t.sizes or [1], t.strides or [1]))
388
+
389
+ num_bytes = n * _element_size(t.dtype)
390
+ assert num_bytes >= 0, f"{num_bytes}"
391
+ self._values[key] = max(self._values.get(key, 0), num_bytes)
392
+
393
+ @staticmethod
394
+ def _flat_tensor_inputs(op: _ExtraFields_TorchOp) -> Iterator[_TensorMetadata]:
395
+ for i in op.inputs:
396
+ if isinstance(i, _TensorMetadata):
397
+ yield i
398
+ elif isinstance(i, list):
399
+ yield from i
400
+
401
+ def __getitem__(self, key: TensorKey):
402
+ return self._values[key]
403
+
404
+
405
+ @dataclasses.dataclass()
406
+ class DataFlowEdge:
407
+ input_version: Optional[int] = None
408
+ mutated: Optional[bool] = False
409
+
410
+ @property
411
+ def is_allocation(self) -> bool:
412
+ return self.input_version is None
413
+
414
+ @property
415
+ def is_deletion(self) -> bool:
416
+ return self.mutated is None
417
+
418
+
419
+ class DataFlowNode:
420
+ def __init__(self, event: _ProfilerEvent, graph: "DataFlowGraph") -> None:
421
+ self._event = event
422
+ self._graph = graph
423
+ self._edges: Dict[TensorKey, DataFlowEdge] = self._determine_edges()
424
+
425
+ for key, edge in self._edges.items():
426
+ if edge.mutated and not edge.is_allocation:
427
+ self._graph.bump(key)
428
+
429
+ # Make sure the version bumping behavior matches what we expect.
430
+ versions = {k: (v, self._graph.lookup(k)) for k, v in self.outputs.items()}
431
+ assert all(i == j for i, j in versions.values()), f"{versions}, {self._edges}"
432
+
433
+ def _determine_edges(self) -> Dict[TensorKey, DataFlowEdge]:
434
+ subtree = tuple(_utils.traverse_dfs([self._event]))
435
+
436
+ # Start by populating edges from op inputs and outputs.
437
+ mutable_by_key: Dict[Optional[TensorKey], Set[Optional[bool]]] = {}
438
+ for op in (i.typed[1] for i in subtree if i.typed[0] == _EventType.TorchOp):
439
+ for op_input, mutable in zip(
440
+ op.inputs, SchemaMatcher.inputs_are_mutable(op)
441
+ ):
442
+ # Tensor
443
+ if isinstance(op_input, _TensorMetadata):
444
+ key = TensorKey.from_tensor(op_input)
445
+ mutable_by_key.setdefault(key, set()).add(mutable)
446
+
447
+ # TensorList
448
+ elif isinstance(op_input, list):
449
+ for op_input_i in op_input:
450
+ key = TensorKey.from_tensor(op_input_i)
451
+ mutable_by_key.setdefault(key, set()).add(mutable)
452
+
453
+ edges: DefaultDict[Optional[TensorKey], DataFlowEdge]
454
+ edges = collections.defaultdict(DataFlowEdge)
455
+ for key, mutable_set in mutable_by_key.items():
456
+ if key is not None:
457
+ edges[key].input_version = self._graph.lookup(key) if key else -1
458
+
459
+ # We consider an op to be mutated if we encounter a schema where it
460
+ # is a mutable argument OR if it is ambiguous. (We never explicitly
461
+ # see it in any schema.)
462
+ mutated = (True in mutable_set) or (tuple(mutable_set) == (None,))
463
+ edges[key].mutated = mutated
464
+
465
+ # Then handle deletions. Note that deleting a Tensor implicitly adds
466
+ # it as an input edge.
467
+ for i in subtree:
468
+ if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size < 0:
469
+ key = TensorKey.from_allocation(i.typed[1])
470
+ edge = edges[key]
471
+ assert key is None or edge.mutated is not None, f"Double delete: {key}"
472
+ edge.mutated = None
473
+ edge.input_version = self._graph.lookup(key) if key else -1
474
+
475
+ # And finally handle allocations. This step must be last, because the
476
+ # previous two steps optimistically add input edges.
477
+ for i in subtree:
478
+ if i.typed[0] == _EventType.Allocation and i.typed[1].alloc_size > 0:
479
+ edges[TensorKey.from_allocation(i.typed[1])].input_version = None
480
+
481
+ # We don't need to sort the inputs, but it makes debugging and unit tests nicer.
482
+ return dict(sorted((k, v) for k, v in edges.items() if k is not None))
483
+
484
+ @property
485
+ def inputs(self) -> Dict[TensorKey, Tuple[bool, int]]:
486
+ return {
487
+ # MyPy can't see through `is_allocation` to know that
488
+ # `v.input_version` is not None.
489
+ k: (bool(v.mutated), cast(int, v.input_version))
490
+ for k, v in self._edges.items()
491
+ if not v.is_allocation
492
+ }
493
+
494
+ @property
495
+ def outputs(self) -> Dict[TensorKey, int]:
496
+ return {
497
+ k: 0 if v.input_version is None else v.input_version + 1
498
+ for k, v in self._edges.items()
499
+ if (v.is_allocation and not v.is_deletion) or v.mutated
500
+ }
501
+
502
+ @property
503
+ def intermediates(self) -> Tuple[TensorKey, ...]:
504
+ return tuple(
505
+ k for k, v in self._edges.items() if v.is_allocation and v.is_deletion
506
+ )
507
+
508
+ @property
509
+ def start_time(self) -> int:
510
+ return self._event.start_time_ns
511
+
512
+
513
+ class DataFlowGraph:
514
+ def __init__(self, op_tree: OpTree) -> None:
515
+ self._op_tree = op_tree
516
+ self._leaf_events = self._extract_leaf_events(op_tree)
517
+ self._active_version: Dict[TensorKey, Optional[int]] = {}
518
+ self._flow_nodes = [DataFlowNode(e, self) for e in self.leaf_events]
519
+ self._flow_nodes.sort(key=lambda x: x.start_time)
520
+ self.validate()
521
+
522
+ @property
523
+ def flow_nodes(self) -> Tuple[DataFlowNode, ...]:
524
+ return tuple(self._flow_nodes)
525
+
526
+ def validate(self):
527
+ # Check that each (Tensor, version) pair has a unique creation node
528
+ outputs: Set[Tuple[TensorKey, int]] = set()
529
+ for node in self.flow_nodes:
530
+ node_outputs = set(node.outputs.items())
531
+ duplicates = outputs & node_outputs
532
+ assert not duplicates, f"{node._event.name} {node._edges} {duplicates}"
533
+ outputs |= node_outputs
534
+
535
+ # And check that `self._nodes` forms a valid topologically sorted DAG.
536
+ tensor_versions: Dict[TensorKey, int] = {}
537
+ for node in self.flow_nodes:
538
+ for key, (_, version) in node.inputs.items():
539
+ expected = tensor_versions.get(key, 0)
540
+ assert expected == version, (expected, version)
541
+
542
+ for key, version in node.outputs.items():
543
+ prior_version = tensor_versions.get(key, version)
544
+ assert version >= prior_version, (version, prior_version)
545
+ tensor_versions[key] = version
546
+
547
+ @property
548
+ def leaf_events(self) -> Tuple[_ProfilerEvent, ...]:
549
+ return self._leaf_events
550
+
551
+ @staticmethod
552
+ def _extract_leaf_events(op_tree: OpTree) -> Tuple[_ProfilerEvent, ...]:
553
+ """Partially traverse the op tree and extract top level ops.
554
+
555
+ Consider the following code:
556
+ ```
557
+ with record_function("My annotation"):
558
+ x.zero_()
559
+ y.zero_()
560
+ ```
561
+
562
+ The op tree (assuming no Autograd) will look like:
563
+ <Python context>
564
+ TorchOp: "My annotation"
565
+ TorchOp: zero_
566
+ TorchOp: fill_
567
+ TorchOp: zero_
568
+ TorchOp: fill_
569
+
570
+ The recursive structure of operator calls makes data flow unwieldy.
571
+ In order to simplify analysis we would like to select the highest level
572
+ ops to represent in the graph. In this case those are the `zero_` ops;
573
+ the fact that `fill_` is called is an implementation detail. We also
574
+ do not want to group everything under "My annotation" as this could
575
+ create overly coarse bundles and lose critical semantics.
576
+
577
+ To address this issue we walk over the graph and select the topmost
578
+ torch ops ** which match at least one operator schema **. These form
579
+ the leaves of the first pass through the op tree. (As well as any
580
+ allocations or frees which do are not part of a kernel.) These events
581
+ form the logical nodes in our data flow graph.
582
+ """
583
+
584
+ leaf_events: List[_ProfilerEvent] = []
585
+
586
+ def leaf_op(e: _ProfilerEvent) -> bool:
587
+ return e.typed[0] == _EventType.TorchOp and (
588
+ e.typed[1].scope == RecordScope.BACKWARD_FUNCTION
589
+ or bool(SchemaMatcher.match_schemas(e.typed[1]))
590
+ )
591
+
592
+ def children_fn(e: _ProfilerEvent):
593
+ if leaf_op(e) or e.tag == _EventType.Allocation:
594
+ leaf_events.append(e)
595
+ return []
596
+
597
+ return e.children
598
+
599
+ for _ in op_tree.dfs(children_fn=children_fn):
600
+ pass
601
+
602
+ return tuple(sorted(leaf_events, key=lambda x: x.start_time_ns))
603
+
604
+ def lookup(self, key: TensorKey) -> int:
605
+ version = self._active_version.setdefault(key, 0)
606
+ assert version is not None
607
+ return version
608
+
609
+ def bump(self, key: TensorKey) -> None:
610
+ prior_version = self._active_version.get(key, None)
611
+ assert prior_version is not None
612
+ self._active_version[key] = prior_version + 1
613
+
614
+ def delete(self, key: TensorKey) -> None:
615
+ assert self._active_version.setdefault(key, 0) is not None
616
+ self._active_version[key] = None
617
+
618
+
619
+ @dataclasses.dataclass
620
+ class CategoryElement:
621
+ by_id: Optional[Category] = None
622
+ by_key: Dict[TensorKey, Category] = dataclasses.field(default_factory=dict)
623
+ by_version: Dict[TensorAndID, Category] = dataclasses.field(default_factory=dict)
624
+
625
+ # Used by unit tests to check internals. (And consequently by
626
+ # MemoryProfile.lookup) This should not be used in any other capacity.
627
+ _by_id_keyset: Set[TensorKey] = dataclasses.field(default_factory=set)
628
+
629
+
630
+ @dataclasses.dataclass
631
+ class CategoryDict:
632
+ _values: DefaultDict[int, CategoryElement] = dataclasses.field(
633
+ default_factory=lambda: collections.defaultdict(CategoryElement)
634
+ )
635
+
636
+ def set_by_id(self, key: TensorKey, category: Category) -> None:
637
+ self._values[key.id].by_id = category
638
+ self._values[key.id]._by_id_keyset.add(key)
639
+
640
+ def set_by_key(self, key: TensorKey, category: Category) -> None:
641
+ self._values[key.id].by_key[key] = category
642
+
643
+ def set_by_version(self, key: TensorKey, version: int, category: Category) -> None:
644
+ self._values[key.id].by_version[(key, version)] = category
645
+
646
+ def setdefault_by_version(
647
+ self, key: TensorKey, version: int, category: Category
648
+ ) -> None:
649
+ self._values[key.id].by_version.setdefault((key, version), category)
650
+
651
+ def get(self, key: Key, version: int) -> Optional[Category]:
652
+ if isinstance(key, Key) and not isinstance(key, TensorKey):
653
+ return None
654
+ element = self._values[key.id]
655
+ return (
656
+ element.by_id
657
+ or element.by_key.get(key, None)
658
+ or element.by_version.get((key, version), None)
659
+ )
660
+
661
+
662
+ class MemoryProfile:
663
+ def __init__(self, result: _ProfilerResult) -> None:
664
+ self._op_tree = OpTree(result)
665
+ self._data_flow_graph = DataFlowGraph(self._op_tree)
666
+ self._size_map = SizeMap(self._op_tree)
667
+ self._categories = CategoryDict()
668
+
669
+ self._set_gradients_and_temporaries()
670
+ self._set_parameters_using_python_tracer()
671
+ self._set_inputs()
672
+ self._set_parameters_using_data_flow()
673
+ self._set_activations()
674
+ self._set_optimizer_state()
675
+ self._set_autograd_detail()
676
+
677
+ @property
678
+ def timeline(self) -> Tuple[Tuple[int, Action, KeyAndID, int], ...]:
679
+ output: List[Tuple[int, Action, KeyAndID, int]] = []
680
+ allocation_times: Dict[Tuple[TensorKey, bool], int] = {}
681
+ live_unknown: Dict[Tuple[int, torch.device], Literal[True]] = {}
682
+ for event in self._op_tree.dfs():
683
+ if event.typed[0] == _EventType.Allocation:
684
+ alloc_fields = event.typed[1]
685
+ alloc_size = alloc_fields.alloc_size
686
+ is_allocation = alloc_size > 0
687
+ t = event.start_time_ns
688
+
689
+ tkey = TensorKey.from_allocation(alloc_fields)
690
+ if tkey is not None:
691
+ allocation_times[(tkey, is_allocation)] = t
692
+
693
+ else:
694
+ key = Key(alloc_fields.device)
695
+ ptr_and_device = (alloc_fields.ptr, key.device)
696
+ if is_allocation:
697
+ if ptr_and_device in live_unknown:
698
+ output.append(
699
+ (t, Action.INCREMENT_VERSION, (key, 0), alloc_size)
700
+ )
701
+ else:
702
+ live_unknown[ptr_and_device] = True
703
+ output.append((t, Action.CREATE, (key, 0), alloc_size))
704
+ else:
705
+ output.append((t, Action.DESTROY, (key, 0), -alloc_size))
706
+ if not live_unknown.pop(ptr_and_device, False):
707
+ output.append(
708
+ (-1, Action.PREEXISTING, (key, 0), -alloc_size)
709
+ )
710
+
711
+ snapshot = self._category_snapshot()
712
+ last_version = dict(sorted(snapshot.keys()))
713
+
714
+ events: List[Tuple[int, Action, TensorAndID]] = [
715
+ (-1, Action.PREEXISTING, (key, version))
716
+ for key, version in snapshot.keys()
717
+ if (key, True) not in allocation_times and version == 0
718
+ ]
719
+
720
+ for node in self._data_flow_graph.flow_nodes:
721
+ for key, edge in node._edges.items():
722
+ if edge.is_allocation:
723
+ t = allocation_times[(key, True)]
724
+ events.append((t, Action.CREATE, (key, 0)))
725
+
726
+ elif edge.mutated:
727
+ t = node._event.start_time_ns
728
+ version = edge.input_version
729
+ assert version is not None
730
+ events.append((t, Action.INCREMENT_VERSION, (key, version)))
731
+
732
+ if edge.is_deletion:
733
+ t = allocation_times[(key, False)]
734
+ events.append((t, Action.DESTROY, (key, last_version[key])))
735
+
736
+ output.extend(
737
+ (time, action, (key, version), self._size_map[key])
738
+ for time, action, (key, version) in events
739
+ )
740
+
741
+ output.sort(key=lambda x: (x[0], x[1].value))
742
+ return tuple(output)
743
+
744
+ def _is_gradient(self, *args, **kwargs) -> bool:
745
+ return self._categories.get(*args, **kwargs) == Category.GRADIENT
746
+
747
+ def _category_snapshot(self) -> Dict[TensorAndID, Optional[Category]]:
748
+ all_tensor_versions: Set[TensorAndID] = set()
749
+
750
+ for node in self._data_flow_graph.flow_nodes:
751
+ all_tensor_versions.update(((k, v) for k, (_, v) in node.inputs.items()))
752
+ all_tensor_versions.update((key, 0) for key in node.intermediates)
753
+ all_tensor_versions.update(node.outputs.items())
754
+
755
+ for i in self._categories._values.values():
756
+ all_tensor_versions.update((key, 0) for key in i._by_id_keyset)
757
+
758
+ return {
759
+ (key, version): self._categories.get(key, version)
760
+ for key, version in sorted(all_tensor_versions)
761
+ }
762
+
763
+ def _any_version_depends_on_gradient(self) -> Set[int]:
764
+ """Extract IDs of Tensors which depend or will depend on a gradient.
765
+
766
+ Note that this weakened definition of "depends" requires us to loop
767
+ over the data flow graph multiple times because it allows dependency
768
+ information to flow backward through edges and removes the guarantee
769
+ that nodes are topologically sorted. (Or indeed, even that a valid
770
+ topological order exists.) Put another way, we have converted an
771
+ acyclic data flow graph into a cyclic graph and we are attempting to
772
+ partition cycles involving a gradient from the rest of the graph.
773
+ """
774
+ depends_on_gradient: Set[int] = set()
775
+ while True:
776
+ start_size = len(depends_on_gradient)
777
+ for node in self._data_flow_graph.flow_nodes:
778
+ ids = tuple(
779
+ key.id
780
+ for key, (_, version) in node.inputs.items()
781
+ if self._categories.get(key, version)
782
+ in (Category.GRADIENT, Category.PARAMETER)
783
+ or key.id in depends_on_gradient
784
+ )
785
+
786
+ if ids:
787
+ depends_on_gradient.update(ids)
788
+ depends_on_gradient.update(key.id for key in node.outputs)
789
+
790
+ # We are guaranteed to exit because there is a finite set of
791
+ # TensorAndID pairs. In practice we do not expect to loop more than
792
+ # three times: once to identify the core parameter update loop,
793
+ # once to fold the first step into that loop, and a third time
794
+ # where no new elements are added.
795
+ if len(depends_on_gradient) == start_size:
796
+ return depends_on_gradient
797
+
798
+ def _set_gradients_and_temporaries(self) -> None:
799
+ """Mark Tensors which are unambiguous and simple to reason about."""
800
+
801
+ # Gradients are straightforward to detect. We directly check the
802
+ # `.grad` property in the Python tracer, and we can detect any new
803
+ # gradient Tensors from `AccumulateGrad` ops.
804
+ for event in self._op_tree.dfs():
805
+ for _, p_grad in extract_gradients(event):
806
+ self._categories.set_by_id(p_grad, Category.GRADIENT)
807
+
808
+ # Similarly, temporary Tensors are easy to identify and are useful to
809
+ # flag since they can make memory use "spikier" than one would
810
+ # otherwise expect.
811
+ for node in self._data_flow_graph.flow_nodes:
812
+ for i in node.intermediates:
813
+ self._categories.set_by_key(i, Category.TEMPORARY)
814
+
815
+ def _set_parameters_using_python_tracer(self) -> None:
816
+ for event in self._op_tree.dfs():
817
+ for p in extract_parameters(event):
818
+ if p is not None:
819
+ self._categories.set_by_id(p, Category.PARAMETER)
820
+
821
+ def _set_inputs(self) -> None:
822
+ """Mark inputs based on which Tensors are updated using gradients.
823
+
824
+ The process for differentiating between inputs and activations is more
825
+ involved. Most Tensors in a training loop depend on at least one
826
+ gradient: parameters depend on them through updates, and activations
827
+ and optimizer state depend on them transitively through parameters.
828
+ Critically, we do not need to know which Tensors are parameters to
829
+ apply this method; we can simply walk the data flow graph to build the
830
+ set of all values which depend on a gradient and then obtain the set
831
+ of inputs from the conjugate set.
832
+
833
+ There is, however, one hiccup. The first time we see a parameter is
834
+ generally on the forward pass of the first step. We know from
835
+ inspection of the data flow graph that v1 of that Tensor depends on
836
+ a gradient (provided we profile an optimizer step), but not v0. To
837
+ address this problem we weaken the definition of "depends on a
838
+ gradient" to "any version of this Tensor depends on a gradient",
839
+ which in turn strengthens the criteria for the input set enough to
840
+ filter the activations in the forward pass of the first step."""
841
+
842
+ # All of this analysis is predicated on using at least one training
843
+ # step (or parameters from the python tracer) to partition the graph.
844
+ # Absent that we cannot determine which Tensors are inputs and which
845
+ # ones are part of the model.
846
+ depends_on_gradient = self._any_version_depends_on_gradient()
847
+
848
+ # We only want to annotate Tensors which actually contribute to the
849
+ # model calculation.
850
+ produces_gradient: Set[TensorAndID] = set()
851
+ for node in reversed(self._data_flow_graph.flow_nodes):
852
+ tensors = {(key, version) for key, (_, version) in node.inputs.items()}
853
+ tensors |= node.outputs.items()
854
+ if any(
855
+ self._categories.get(*i) in (Category.GRADIENT, Category.PARAMETER)
856
+ or i in produces_gradient
857
+ for i in tensors
858
+ ):
859
+ produces_gradient |= tensors
860
+
861
+ # Don't include Tensors created in the backward pass, as these are
862
+ # generally Autograd implementation details rather than proper inputs.
863
+ input_candidates = produces_gradient.copy()
864
+ for node in self._data_flow_graph.flow_nodes:
865
+ if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):
866
+ input_candidates -= set(node.outputs.items())
867
+
868
+ for key, version in input_candidates:
869
+ if key.id not in depends_on_gradient:
870
+ self._categories.setdefault_by_version(key, version, Category.INPUT)
871
+
872
+ def _set_parameters_using_data_flow(self) -> None:
873
+ """Deduce which Tensors are parameters.
874
+
875
+ Consider the following code for the step of SGD with momentum
876
+ (nesterov=False), where `d_p` is the gradient of `param` and `buf` is
877
+ the momentum buffer.
878
+ ```
879
+ buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
880
+ d_p = buf
881
+ param.add_(d_p, alpha=-lr)
882
+ ```
883
+ Both `param` and `buf` take a gradient and perform an in-place update.
884
+
885
+ The python tracer will inspect calls to `nn.Module.forward` and
886
+ `optim.Optimizer.step` to extract parameter and optimizer state
887
+ respectively (including parameters), so this is generally a non-issue.
888
+
889
+ However as a fallback we can also exploit several properties of
890
+ parameters to distinguish them from other model state.
891
+
892
+ First, they are directly used in the forward pass. (At this point we
893
+ haven't established which parts of the graph correspond to the forward
894
+ pass but we can deduce enough to suffice.) Some mutable state such as
895
+ batch norm moving averages also contribute to the forward pass, but
896
+ optimizer state does not.
897
+
898
+ Second, a parameter is by definition used to compute at least one
899
+ gradient and depends on at least one gradient.
900
+ """
901
+ snapshot = self._category_snapshot()
902
+
903
+ # Determine which Tensors might be parameters based on forward pass
904
+ # data flow. Note this these are only candidates; we filter nodes that
905
+ # we know are part of the backward pass but that doesn't guarantee that
906
+ # they are part of the forward pass.
907
+ candidate_parameters: Set[TensorAndID] = set()
908
+ candidate_fwd_tensors: Set[TensorAndID] = {
909
+ i for i, category in snapshot.items() if category == Category.INPUT
910
+ }
911
+
912
+ for node in self._data_flow_graph.flow_nodes:
913
+ inputs = {(key, value) for key, (_, value) in node.inputs.items()}
914
+ if (
915
+ # Don't check nodes in the backward pass.
916
+ RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event)
917
+ and not any(self._is_gradient(*i) for i in inputs)
918
+ and not any(self._is_gradient(*i) for i in node.outputs.items())
919
+ #
920
+ # and only check nodes which depend on an input.
921
+ and candidate_fwd_tensors.intersection(inputs)
922
+ ):
923
+ candidate_fwd_tensors |= node.outputs.items()
924
+ candidate_parameters |= inputs.difference(candidate_fwd_tensors)
925
+
926
+ # Require that each parameter eventually contributes to the value of a gradient
927
+ used_for_gradient: Set[TensorAndID] = set()
928
+ for node in reversed(self._data_flow_graph.flow_nodes):
929
+ if any(
930
+ self._is_gradient(*i) or i in used_for_gradient
931
+ for i in node.outputs.items()
932
+ ):
933
+ for key, (_, version) in node.inputs.items():
934
+ used_for_gradient.add((key, version))
935
+ candidate_parameters.intersection_update(used_for_gradient)
936
+
937
+ # and depends on a gradient.
938
+ parameter_keys = {key.id for key, _ in candidate_parameters}
939
+ parameter_keys &= self._any_version_depends_on_gradient()
940
+
941
+ for key, _ in snapshot.keys():
942
+ if key.id in parameter_keys:
943
+ self._categories.set_by_id(key, Category.PARAMETER)
944
+
945
+ def _set_activations(self) -> None:
946
+ """Flood the graph to identify activations."""
947
+
948
+ required = {Category.INPUT, Category.ACTIVATION}
949
+ also_allowed = {Category.PARAMETER, Category.TEMPORARY}
950
+ for node in self._data_flow_graph.flow_nodes:
951
+ inputs = {(key, value) for key, (_, value) in node.inputs.items()}
952
+ input_categories = {self._categories.get(*i) for i in inputs}
953
+
954
+ if (
955
+ (input_categories & required)
956
+ and not (input_categories - (required | also_allowed))
957
+ #
958
+ # Stop filling when we reach the backward pass.
959
+ and RecordScope.BACKWARD_FUNCTION not in get_scopes(node._event)
960
+ ):
961
+ for i in node.outputs.items():
962
+ self._categories.setdefault_by_version(*i, Category.ACTIVATION)
963
+
964
+ def _set_optimizer_state(self) -> None:
965
+ for event in self._op_tree.dfs():
966
+ if event.typed[0] == _EventType.PyCall and event.typed[1].optimizer:
967
+ parameters = event.typed[1].optimizer.parameters
968
+ for _, t in it.chain(*[state for _, _, state in parameters]):
969
+ key = TensorKey.from_tensor(t)
970
+ if key is not None:
971
+ self._categories.set_by_id(key, Category.OPTIMIZER_STATE)
972
+
973
+ def _set_autograd_detail(self):
974
+ prior = {None, Category.AUTOGRAD_DETAIL}
975
+ for node in self._data_flow_graph.flow_nodes:
976
+ if RecordScope.BACKWARD_FUNCTION in get_scopes(node._event):
977
+ for key, version in node.outputs.items():
978
+ if version == 0 or self._categories.get(key, version - 1) in prior:
979
+ self._categories.setdefault_by_version(
980
+ key, version, Category.AUTOGRAD_DETAIL
981
+ )
982
+
983
+
984
+ class MemoryProfileTimeline:
985
+ def __init__(self, memory_profile):
986
+ """The minimum representation of the memory profile timeline
987
+ includes the memory timeline and categories. The timeline
988
+ consists of [timestamp, action, (TensorKey, version), numbytes]
989
+ elements, to denote any actions (pre-existing, create, destroy,
990
+ or increment_version) that occurred to a specific Tensor for a
991
+ chunk of memory. The categories help map each (TensorKey,
992
+ version) pair into a category."""
993
+ self.timeline = memory_profile.timeline
994
+ self.categories = memory_profile._categories
995
+
996
+ def _coalesce_timeline(self, device_str):
997
+ """Convert the memory timeline and categories into a memory plot
998
+ consisting of timestamps and their respective sizes by category
999
+ for a given device.
1000
+
1001
+ Input: device
1002
+ Output: [timestamps, sizes by category]
1003
+ """
1004
+ device = torch.device(device_str)
1005
+ times: List[int] = []
1006
+ sizes: List[List[int]] = []
1007
+
1008
+ def update(key, version, delta):
1009
+ category = (
1010
+ self.categories.get(key, version)
1011
+ if isinstance(key, TensorKey)
1012
+ else None
1013
+ )
1014
+ index = _CATEGORY_TO_INDEX[category] + 1
1015
+ sizes[-1][index] += int(delta)
1016
+
1017
+ t_min = -1
1018
+ for t, action, (key, version), numbytes in self.timeline:
1019
+ if key.device != device:
1020
+ continue
1021
+
1022
+ # Convert timestamps from ns to us, to match trace events.
1023
+ if t != -1:
1024
+ t = int(t / 1000)
1025
+
1026
+ # Save the smallest timestamp to populate pre-existing allocs.
1027
+ if t_min == -1 or (t < t_min and t > 0):
1028
+ t_min = t
1029
+
1030
+ # Handle timestep
1031
+ if len(times) == 0:
1032
+ times.append(t)
1033
+ sizes.append([0] + [0 for _ in _CATEGORY_TO_INDEX])
1034
+
1035
+ elif t != times[-1]:
1036
+ times.append(t)
1037
+ sizes.append(sizes[-1].copy())
1038
+
1039
+ # Handle memory and categories
1040
+ if action in (Action.PREEXISTING, Action.CREATE):
1041
+ update(key, version, numbytes)
1042
+
1043
+ elif action == Action.INCREMENT_VERSION:
1044
+ update(key, version, -numbytes)
1045
+ update(key, version + 1, numbytes)
1046
+
1047
+ elif action == Action.DESTROY:
1048
+ update(key, version, -numbytes)
1049
+
1050
+ else:
1051
+ raise ValueError(f"Unknown action: {action}")
1052
+
1053
+ times = [t_min if t < 0 else t for t in times]
1054
+ return times, sizes
1055
+
1056
+ def export_memory_timeline(self, path, device_str) -> None:
1057
+ """Saves the memory timeline as [times, sizes by category]
1058
+ as a JSON formatted file to the given path for the given
1059
+ device."""
1060
+ times, sizes = self._coalesce_timeline(device_str)
1061
+ # TODO: Write a faster serialize (orjson not available in CI)
1062
+ import json
1063
+
1064
+ with open(path, "w") as f:
1065
+ json.dump([times, sizes], f)
1066
+
1067
+ def export_memory_timeline_raw(self, path, device_str) -> None:
1068
+ """Saves the memory timeline as raw memory event tuples in the
1069
+ form of (timestamp, action, numbytes, category)
1070
+ as a JSON formatted file to the given path for the given
1071
+ device."""
1072
+ device = torch.device(device_str)
1073
+ raw_events: List[Tuple[int, int, int, int]] = []
1074
+
1075
+ def get_category_index(key, version):
1076
+ category = (
1077
+ self.categories.get(key, version)
1078
+ if isinstance(key, TensorKey)
1079
+ else None
1080
+ )
1081
+ return _CATEGORY_TO_INDEX[category]
1082
+
1083
+ for t, action, (key, version), numbytes in self.timeline:
1084
+ if key.device != device:
1085
+ continue
1086
+
1087
+ if action in (Action.PREEXISTING, Action.CREATE):
1088
+ raw_events.append(
1089
+ (
1090
+ t,
1091
+ _ACTION_TO_INDEX[action],
1092
+ numbytes,
1093
+ get_category_index(key, version),
1094
+ )
1095
+ )
1096
+
1097
+ elif action == Action.INCREMENT_VERSION:
1098
+ raw_events.append(
1099
+ (
1100
+ t,
1101
+ _ACTION_TO_INDEX[action],
1102
+ -numbytes,
1103
+ get_category_index(key, version),
1104
+ )
1105
+ )
1106
+ raw_events.append(
1107
+ (
1108
+ t,
1109
+ _ACTION_TO_INDEX[action],
1110
+ numbytes,
1111
+ get_category_index(key, version + 1),
1112
+ )
1113
+ )
1114
+
1115
+ elif action == Action.DESTROY:
1116
+ raw_events.append(
1117
+ (
1118
+ t,
1119
+ _ACTION_TO_INDEX[action],
1120
+ -numbytes,
1121
+ get_category_index(key, version),
1122
+ )
1123
+ )
1124
+
1125
+ else:
1126
+ raise ValueError(f"Unknown action: {action}")
1127
+
1128
+ import json
1129
+
1130
+ with open(path, "w") as f:
1131
+ json.dump(raw_events, f)
1132
+
1133
+ def export_memory_timeline_html(
1134
+ self, path, device_str, figsize=(20, 12), title=None
1135
+ ) -> None:
1136
+ """Exports the memory timeline as an HTML file which contains
1137
+ the memory timeline plot embedded as a PNG file."""
1138
+ # Check if user has matplotlib installed, return gracefully if not.
1139
+ import importlib.util
1140
+
1141
+ matplotlib_spec = importlib.util.find_spec("matplotlib")
1142
+ if matplotlib_spec is None:
1143
+ print(
1144
+ "export_memory_timeline_html failed because matplotlib was not found."
1145
+ )
1146
+ return
1147
+
1148
+ from base64 import b64encode
1149
+ from os import remove
1150
+ from tempfile import NamedTemporaryFile
1151
+
1152
+ import matplotlib.pyplot as plt
1153
+ import numpy as np
1154
+
1155
+ mt = self._coalesce_timeline(device_str)
1156
+ times, sizes = np.array(mt[0]), np.array(mt[1])
1157
+ # For this timeline, start at 0 to match Chrome traces.
1158
+ t_min = min(times)
1159
+ times -= t_min
1160
+ stacked = np.cumsum(sizes, axis=1) / 1024**3
1161
+ device = torch.device(device_str)
1162
+ max_memory_allocated = torch.cuda.max_memory_allocated(device)
1163
+ max_memory_reserved = torch.cuda.max_memory_reserved(device)
1164
+
1165
+ # Plot memory timeline as stacked data
1166
+ fig = plt.figure(figsize=figsize, dpi=80)
1167
+ axes = fig.gca()
1168
+ for category, color in _CATEGORY_TO_COLORS.items():
1169
+ i = _CATEGORY_TO_INDEX[category]
1170
+ axes.fill_between(
1171
+ times / 1e3, stacked[:, i], stacked[:, i + 1], color=color, alpha=0.7
1172
+ )
1173
+ fig.legend(["Unknown" if i is None else i.name for i in _CATEGORY_TO_COLORS])
1174
+ # Usually training steps are in magnitude of ms.
1175
+ axes.set_xlabel("Time (ms)")
1176
+ axes.set_ylabel("Memory (GB)")
1177
+ title = "\n\n".join(
1178
+ ([title] if title else [])
1179
+ + [
1180
+ f"Max memory allocated: {max_memory_allocated/(1024**3):.2f} GiB \n"
1181
+ f"Max memory reserved: {max_memory_reserved/(1024**3):.2f} GiB"
1182
+ ]
1183
+ )
1184
+ axes.set_title(title)
1185
+
1186
+ # Embed the memory timeline image into the HTML file
1187
+ tmpfile = NamedTemporaryFile("wb", suffix=".png", delete=False)
1188
+ tmpfile.close()
1189
+ fig.savefig(tmpfile.name, format="png")
1190
+
1191
+ with open(tmpfile.name, "rb") as tmp:
1192
+ encoded = b64encode(tmp.read()).decode("utf-8")
1193
+ html = f"""<html>
1194
+ <head><meta charset="utf-8" /><title>GPU Memory Timeline HTML</title></head>
1195
+ <body>
1196
+ <img src='data:image/png;base64,{encoded}'>
1197
+ </body>
1198
+ </html>"""
1199
+
1200
+ with open(path, "w") as f:
1201
+ f.write(html)
1202
+ remove(tmpfile.name)
venv/lib/python3.10/site-packages/torch/profiler/_pattern_matcher.py ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import math
3
+ import os
4
+ import re
5
+ from typing import Dict, List, Optional, Set
6
+
7
+ import torch
8
+ import torch.utils.benchmark as benchmark
9
+ from torch._C._profiler import (
10
+ _EventType,
11
+ _ExtraFields_PyCall,
12
+ _ExtraFields_PyCCall,
13
+ _ExtraFields_TorchOp,
14
+ _ProfilerEvent,
15
+ )
16
+ from torch.profiler import profile
17
+ from torch.profiler._utils import index_of_first_match, traverse_bfs, traverse_dfs
18
+
19
+
20
+ class Pattern:
21
+ """
22
+ Base class for all patterns, subclass this class and implement match()
23
+ to define custom patterns.
24
+
25
+ In subclass, define description and skip property.
26
+ """
27
+
28
+ def __init__(self, prof: profile, should_benchmark: bool = False):
29
+ self.prof = prof
30
+ self.should_benchmark = should_benchmark
31
+ self.name = "Please specify a name for pattern"
32
+ self.description = "Please specify a description for pattern"
33
+ self.url = ""
34
+ assert prof.profiler is not None and prof.profiler.kineto_results is not None
35
+ self.event_tree = prof.profiler.kineto_results.experimental_event_tree()
36
+ self.tid_root: Dict[int, List[_ProfilerEvent]] = {}
37
+ for event in self.event_tree:
38
+ self.tid_root.setdefault(event.start_tid, []).append(event)
39
+
40
+ @property
41
+ def skip(self):
42
+ return False
43
+
44
+ def report(self, event: _ProfilerEvent):
45
+ msg = (
46
+ f"{self.description}\n[Source Code Location] {source_code_location(event)}"
47
+ )
48
+ return msg
49
+
50
+ def eventTreeTraversal(self):
51
+ """
52
+ Traverse the event tree and yield all events.
53
+ Override this method in subclass to customize the traversal.
54
+ """
55
+ yield from traverse_dfs(self.event_tree)
56
+
57
+ def summary(self, events: List[_ProfilerEvent]):
58
+ default_summary = f"{self.name}: {len(events)} events matched."
59
+ if self.should_benchmark:
60
+ # If benchmark summary is not empty, use it.
61
+ return (
62
+ self.benchmark_summary(events)
63
+ if hasattr(self, "benchmark") # type: ignore[attr-defined]
64
+ else default_summary
65
+ )
66
+ return default_summary
67
+
68
+ def benchmark_summary(self, events: List[_ProfilerEvent]):
69
+ def format_time(time_ns: int):
70
+ unit_lst = ["ns", "us", "ms"]
71
+ for unit in unit_lst:
72
+ if time_ns < 1000:
73
+ return f"{time_ns:.2f} {unit}"
74
+ time_ns //= 1000
75
+ return f"{time_ns:.2f} s"
76
+
77
+ assert hasattr(self, "benchmark"), "Please implement benchmark()"
78
+ shapes_factor_map = self.benchmark(events) # type: ignore[attr-defined]
79
+ original_time = sum(event.duration_time_ns for event in events)
80
+ new_time = sum(
81
+ shapes_factor_map[input_shapes(event)] * event.duration_time_ns
82
+ for event in events
83
+ )
84
+ return (
85
+ f"{self.name}: {len(events)} events matched. "
86
+ f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)"
87
+ )
88
+
89
+ def match(self, event: _ProfilerEvent):
90
+ """
91
+ Return True if the event matches the pattern.
92
+ This method should be overriden in subclass.
93
+ """
94
+ raise NotImplementedError
95
+
96
+ def matched_events(self):
97
+ if self.skip:
98
+ return []
99
+ matched_events = []
100
+ for event in self.eventTreeTraversal():
101
+ if self.match(event):
102
+ matched_events.append(event)
103
+ return matched_events
104
+
105
+ def root_of(self, event: _ProfilerEvent):
106
+ while event.parent:
107
+ event = event.parent
108
+ return event
109
+
110
+ def siblings_of(self, event: _ProfilerEvent):
111
+ if event.parent:
112
+ children = event.parent.children
113
+ else:
114
+ children = self.tid_root[event.start_tid]
115
+ index = children.index(event)
116
+ return children[:index], children[index + 1 :]
117
+
118
+ def next_of(self, event: _ProfilerEvent):
119
+ _, next_events = self.siblings_of(event)
120
+ return next_events[0] if next_events else None
121
+
122
+ def prev_of(self, event: _ProfilerEvent):
123
+ prev_events, _ = self.siblings_of(event)
124
+ return prev_events[-1] if prev_events else None
125
+
126
+ def go_up_until(self, event: _ProfilerEvent, predicate):
127
+ if not event:
128
+ return None
129
+ while event.parent and not predicate(event):
130
+ event = event.parent
131
+ return event
132
+
133
+
134
+ # Patterns
135
+
136
+
137
+ class NamePattern(Pattern):
138
+ def __init__(self, prof: profile, name: str, should_benchmark: bool = False):
139
+ super().__init__(prof, should_benchmark)
140
+ self.description = f"Matched Name Event: {name}"
141
+ self.name = name
142
+
143
+ def match(self, event: _ProfilerEvent):
144
+ return re.search(self.name, event.name) is not None
145
+
146
+
147
+ class ExtraCUDACopyPattern(Pattern):
148
+ """
149
+ This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU.
150
+ example: torch.zeros((100, 100)).to("cuda")
151
+
152
+ Pattern:
153
+ build-in method |build-in method
154
+ ... | aten::to
155
+ aten::fill_/aten::zero_ | aten::_to_copy
156
+
157
+ Algorithm:
158
+ We start at node aten::to, go parent events' previous events,
159
+ and check if we have a aten::fill_/aten::zero_ as we keep going down the tree.
160
+ We always select the last child in the children list when we go down the tree.
161
+ If at any step we failed, it is not a match.
162
+ """
163
+
164
+ def __init__(self, prof: profile, should_benchmark: bool = False):
165
+ super().__init__(prof, should_benchmark)
166
+ self.name = "Extra CUDA Copy Pattern"
167
+ self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU."
168
+ self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device"
169
+ self.init_ops = {
170
+ "aten::fill_",
171
+ "aten::zero_",
172
+ "aten::normal_",
173
+ "aten::uniform_",
174
+ }
175
+
176
+ @property
177
+ def skip(self):
178
+ return not self.prof.with_stack or not self.prof.record_shapes
179
+
180
+ def match(self, event):
181
+ # TODO: We should also check tensor identities
182
+ if event.name != "aten::to":
183
+ return False
184
+ to_event = event
185
+ if not event.children:
186
+ return False
187
+ event = event.children[-1]
188
+ if event.name != "aten::_to_copy":
189
+ return False
190
+ if not event.children:
191
+ return False
192
+ event = event.children[-1]
193
+ if event.name != "aten::copy_":
194
+ return False
195
+ # aten::copy_ should have the first 2 args dtype the same
196
+ dtypes = input_dtypes(event)
197
+ if len(dtypes) < 2:
198
+ return False
199
+ if dtypes[0] is None or dtypes[0] != dtypes[1]:
200
+ return False
201
+ event = to_event
202
+ # Up one level
203
+ event = event.parent
204
+ if event is None:
205
+ return False
206
+ # Check if we have a aten::fill_ in previous leaf
207
+ event = self.prev_of(event)
208
+ if event is None:
209
+ return False
210
+ while event.children:
211
+ event = event.children[-1]
212
+ # aten::zero_ is a special optimzation case where fill_ is not called
213
+ if event.name in self.init_ops:
214
+ return True
215
+ return event.name in self.init_ops
216
+ # TODO: Check if tensor is reused
217
+
218
+ def benchmark(self, events: List[_ProfilerEvent]):
219
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
220
+ for shape in shapes_factor_map:
221
+ size = shape[0]
222
+ to_timer = benchmark.Timer(
223
+ stmt='torch.ones(size).to("cuda")', globals={"size": size}
224
+ )
225
+ de_timer = benchmark.Timer(
226
+ stmt='torch.ones(size, device="cuda")', globals={"size": size}
227
+ )
228
+ to_time = to_timer.timeit(10).mean
229
+ de_time = de_timer.timeit(10).mean
230
+ shapes_factor_map[shape] = de_time / to_time
231
+ return shapes_factor_map
232
+
233
+
234
+ class ForLoopIndexingPattern(Pattern):
235
+ """
236
+ This pattern identifies if we use a for loop to index a tensor that
237
+ can be vectorized.
238
+ example:
239
+ tensor = torch.empty((100, 100))
240
+ for i in range(100):
241
+ tensor[i] = i
242
+
243
+ Pattern:
244
+ aten::select | ... | aten::select | ... (Repeat)
245
+
246
+ Algorithm:
247
+ We start at node aten::select, and we check if we can find this alternating patterns.
248
+ We also keep a dictionary to avoid duplicate match in the for loop.
249
+ """
250
+
251
+ def __init__(self, prof: profile, should_benchmark: bool = False):
252
+ super().__init__(prof, should_benchmark)
253
+ self.name = "For Loop Indexing Pattern"
254
+ self.description = "For loop indexing detected. Vectorization recommended."
255
+ self.visited: Set[int] = set()
256
+
257
+ def eventTreeTraversal(self):
258
+ """
259
+ We need to use BFS traversal order to avoid duplicate match.
260
+ """
261
+ yield from traverse_bfs(self.event_tree)
262
+
263
+ def match(self, event: _ProfilerEvent):
264
+ if event.name != "aten::select":
265
+ return False
266
+ if event.id in self.visited:
267
+ return False
268
+ repeat_count = 1
269
+ _, next = self.siblings_of(event)
270
+ if len(next) <= 1:
271
+ return False
272
+
273
+ # Custom event list matching
274
+ def same_ops(list1, list2):
275
+ if len(list1) != len(list2):
276
+ return False
277
+ for op1, op2 in zip(list1, list2):
278
+ if op1.name != op2.name:
279
+ return False
280
+ return True
281
+
282
+ # Record the ops between two aten::select
283
+ next_select_idx = index_of_first_match(next, lambda e: e.name == "aten::select")
284
+ if next_select_idx is None:
285
+ return False
286
+ indexing_ops = [event] + next[:next_select_idx]
287
+ next = next[len(indexing_ops) - 1 :]
288
+ for i in range(0, len(next), len(indexing_ops)):
289
+ if same_ops(indexing_ops, next[i : i + len(indexing_ops)]):
290
+ repeat_count += 1
291
+ self.visited.add(next[i].id)
292
+ else:
293
+ break
294
+ return repeat_count >= 10
295
+
296
+
297
+ class FP32MatMulPattern(Pattern):
298
+ def __init__(self, prof: profile, should_benchmark: bool = False):
299
+ super().__init__(prof, should_benchmark)
300
+ self.name = "FP32 MatMul Pattern"
301
+ self.description = (
302
+ "You are currently using GPU that supports TF32. "
303
+ "Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'"
304
+ )
305
+ self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
306
+
307
+ @property
308
+ def skip(self):
309
+ if torch.version.hip is not None:
310
+ has_tf32 = False
311
+ else:
312
+ # Anything less than sm_80 is not Ampere which doesn't support TF32
313
+ has_tf32 = all(int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list())
314
+ return has_tf32 is False or super().skip or not self.prof.record_shapes
315
+
316
+ def match(self, event: _ProfilerEvent):
317
+ # If we saw this pattern once, we don't need to match it again
318
+ if event.tag != _EventType.TorchOp:
319
+ return False
320
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
321
+ if event.name == "aten::mm":
322
+ if event.extra_fields.allow_tf32_cublas is False:
323
+ return True
324
+ return False
325
+
326
+ def report(self, event: _ProfilerEvent):
327
+ return self.description
328
+
329
+ def benchmark(self, events: List[_ProfilerEvent]):
330
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
331
+ for shape in shapes_factor_map:
332
+ matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32)
333
+ matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32)
334
+ fp32_timer = benchmark.Timer(
335
+ stmt="torch.mm(matrixA, matrixB)",
336
+ globals={"matrixA": matrixA, "matrixB": matrixB},
337
+ )
338
+ tf32_timer = benchmark.Timer(
339
+ stmt="torch.mm(matrixA, matrixB)",
340
+ setup="torch.backends.cuda.matmul.allow_tf32 = True",
341
+ globals={"matrixA": matrixA, "matrixB": matrixB},
342
+ )
343
+ torch.backends.cuda.matmul.allow_tf32 = False
344
+ fp32_time = fp32_timer.timeit(10).mean
345
+ tf32_time = tf32_timer.timeit(10).mean
346
+ shapes_factor_map[shape] = tf32_time / fp32_time
347
+ return shapes_factor_map
348
+
349
+
350
+ class OptimizerSingleTensorPattern(Pattern):
351
+ """
352
+ This pattern identifies if we are using the single-tensor version of an optimizer.
353
+ example:
354
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
355
+ By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when
356
+ the kernels are relatively small.
357
+
358
+ Pattern:
359
+ XXXXX: _single_tenser_<OPTIMIZER_NAME>
360
+
361
+ Algorithm:
362
+ String match
363
+ """
364
+
365
+ def __init__(self, prof: profile, should_benchmark: bool = False):
366
+ super().__init__(prof, should_benchmark)
367
+ self.name = "Optimizer Single Tensor Pattern"
368
+ self.optimizers_with_foreach = ["adam", "sgd", "adamw"]
369
+ self.description = (
370
+ "Deteced optimizer running with single tensor implementation. "
371
+ "Please enable multi tensor implementation by passing 'foreach=True' into optimizer."
372
+ )
373
+ self.url = ""
374
+
375
+ def match(self, event: _ProfilerEvent):
376
+ for optimizer in self.optimizers_with_foreach:
377
+ if event.name.endswith(f"_single_tensor_{optimizer}"):
378
+ return True
379
+ return False
380
+
381
+
382
+ class SynchronizedDataLoaderPattern(Pattern):
383
+ """
384
+ This pattern identifies if we are using num_workers=0 in DataLoader.
385
+ example:
386
+ torch.utils.data.DataLoader(dataset, batch_size=batch_size)
387
+ Add num_workers=N to the arguments. N depends on system configuration.
388
+
389
+ Pattern:
390
+ dataloader.py(...): __iter__
391
+ dataloader.py(...): _get_iterator
392
+ NOT dataloader.py(...): check_worker_number_rationality
393
+
394
+ Algorithm:
395
+ If we don't see check_worker_number_rationality call in the dataloader __iter__,
396
+ It is not an asynchronous dataloader.
397
+
398
+ """
399
+
400
+ def __init__(self, prof: profile, should_benchmark: bool = False):
401
+ super().__init__(prof, should_benchmark)
402
+ self.name = "Synchronized DataLoader Pattern"
403
+ self.description = (
404
+ "Detected DataLoader running with synchronized implementation. "
405
+ "Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader."
406
+ )
407
+ self.url = (
408
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
409
+ "#enable-async-data-loading-and-augmentation"
410
+ )
411
+
412
+ def match(self, event: _ProfilerEvent):
413
+ def is_dataloader_function(name: str, function_name: str):
414
+ return name.startswith(
415
+ os.path.join("torch", "utils", "data", "dataloader.py")
416
+ ) and name.endswith(function_name)
417
+
418
+ # TODO: fixme! Due to lifetime issues of the function name, this field might
419
+ # actually point to an already freed string when the even is a PyCall.
420
+ # Just silently skip this to unblock testing.
421
+ try:
422
+ event.name
423
+ except UnicodeDecodeError:
424
+ return False
425
+
426
+ if not is_dataloader_function(event.name, "__iter__"):
427
+ return False
428
+ if not event.children:
429
+ return False
430
+ event = event.children[0]
431
+ if not is_dataloader_function(event.name, "_get_iterator"):
432
+ return False
433
+ if not event.children:
434
+ return False
435
+ event = event.children[0]
436
+ return not is_dataloader_function(event.name, "check_worker_number_rationality")
437
+ # TODO: We should also check if the loader is bottleneck.
438
+
439
+
440
+ class GradNotSetToNonePattern(Pattern):
441
+ """
442
+ This pattern identifies if we are not setting grad to None in zero_grad.
443
+ example:
444
+ optimizer.zero_grad()
445
+ By setting set_to_none=True, we can gain speedup
446
+
447
+ Pattern:
448
+ XXXXX: _zero_grad
449
+ NOT aten::zeros
450
+ aten::zero_
451
+
452
+ aten::zero_ is called on each parameter in the model.
453
+ We also want to make sure it is not called by aten::zeros.
454
+
455
+ Algorithm:
456
+ String match
457
+ """
458
+
459
+ def __init__(self, prof: profile, should_benchmark: bool = False):
460
+ super().__init__(prof, should_benchmark)
461
+ self.name = "Gradient Set To Zero Instead of None Pattern"
462
+ self.description = (
463
+ "Detected gradient set to zero instead of None. "
464
+ "Please add 'set_to_none=True' when calling zero_grad()."
465
+ )
466
+ self.url = (
467
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
468
+ "#disable-gradient-calculation-for-validation-or-inference"
469
+ )
470
+
471
+ def match(self, event: _ProfilerEvent):
472
+ if not event.name.endswith(": zero_grad"):
473
+ return False
474
+ if not event.children:
475
+ return False
476
+
477
+ for sub_event in traverse_dfs(event.children):
478
+ if (
479
+ sub_event.name == "aten::zero_"
480
+ and sub_event.parent.name != "aten::zeros"
481
+ ):
482
+ return True
483
+ # TODO: We should also check if the optimizer's numerical behavior will change.
484
+ return False
485
+
486
+
487
+ class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):
488
+ """
489
+ This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d.
490
+ Bias doesn't do anything when followed by batchnorm.
491
+ Pattern:
492
+ nn.Module: Conv2d | nn.Module: BatchNorm2d
493
+ ...
494
+ aten::conv2d AND dtype of third argument is not null
495
+ The third argument is the bias
496
+ Algorithm:
497
+ String match
498
+ """
499
+
500
+ def __init__(self, prof: profile, should_benchmark: bool = False):
501
+ super().__init__(prof, should_benchmark)
502
+ self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern"
503
+ self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d."
504
+ self.url = (
505
+ "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
506
+ "#disable-bias-for-convolutions-directly-followed-by-a-batch-norm"
507
+ )
508
+
509
+ @property
510
+ def skip(self):
511
+ return self.prof.record_shapes is False or super().skip
512
+
513
+ def match(self, event: _ProfilerEvent):
514
+ if event.name != "aten::conv2d":
515
+ return False
516
+ if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None:
517
+ return False
518
+ # This means bias=True
519
+ event = self.go_up_until(
520
+ event, lambda e: e.name.startswith("nn.Module: Conv2d")
521
+ )
522
+ if not event:
523
+ return False
524
+ event = self.next_of(event)
525
+ if not event:
526
+ return False
527
+ return event.name.startswith("nn.Module: BatchNorm2d")
528
+
529
+
530
+ class MatMulDimInFP16Pattern(Pattern):
531
+ def __init__(self, prof: profile, should_benchmark: bool = False):
532
+ super().__init__(prof, should_benchmark)
533
+ self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
534
+ self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
535
+ self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
536
+
537
+ @property
538
+ def skip(self):
539
+ return not self.prof.with_stack or not self.prof.record_shapes
540
+
541
+ def match(self, event: _ProfilerEvent):
542
+ def mutiple_of(shapes, multiple):
543
+ return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:])
544
+
545
+ if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"):
546
+ return False
547
+ if not input_dtypes(event):
548
+ return False
549
+ arg_dtype = input_dtypes(event)[0]
550
+ if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of(
551
+ input_shapes(event), 8
552
+ ):
553
+ return True
554
+ return False
555
+
556
+ def benchmark(self, events: List[_ProfilerEvent]):
557
+ def closest_multiple(shapes, multiple):
558
+ return [multiple * math.ceil(shape / multiple) for shape in shapes]
559
+
560
+ shapes_factor_map = {input_shapes(event): 0.0 for event in events}
561
+ for shape in shapes_factor_map:
562
+ matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16)
563
+ matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16)
564
+ not_aligned_dim_timer = benchmark.Timer(
565
+ stmt="torch.mm(matrixA, matrixB)",
566
+ globals={"matrixA": matrixA, "matrixB": matrixB},
567
+ )
568
+ matrixA = torch.randn(
569
+ closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16
570
+ )
571
+ matrixB = torch.randn(
572
+ closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16
573
+ )
574
+ aligned_dim_timer = benchmark.Timer(
575
+ stmt="torch.mm(matrixA, matrixB)",
576
+ globals={"matrixA": matrixA, "matrixB": matrixB},
577
+ )
578
+ not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean
579
+ aligned_dim_time = aligned_dim_timer.timeit(10).mean
580
+ shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time
581
+ return shapes_factor_map
582
+
583
+
584
+ def source_code_location(event: Optional[_ProfilerEvent]):
585
+ while event:
586
+ if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
587
+ assert isinstance(
588
+ event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)
589
+ )
590
+ if not event.extra_fields.caller.file_name.startswith("torch" + os.sep):
591
+ return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}"
592
+ event = event.parent
593
+ return "No source code location found"
594
+
595
+
596
+ def input_shapes(event: _ProfilerEvent):
597
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
598
+ return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs)
599
+
600
+
601
+ def input_dtypes(event: _ProfilerEvent):
602
+ assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
603
+ return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs)
604
+
605
+
606
+ def report_all_anti_patterns(
607
+ prof,
608
+ should_benchmark: bool = False,
609
+ print_enable: bool = True,
610
+ json_report_dir: Optional[str] = None,
611
+ ):
612
+ report_dict: Dict = {}
613
+ anti_patterns = [
614
+ ExtraCUDACopyPattern(prof, should_benchmark),
615
+ # ForLoopIndexingPattern(prof, should_benchmark),
616
+ FP32MatMulPattern(prof, should_benchmark),
617
+ OptimizerSingleTensorPattern(prof, should_benchmark),
618
+ SynchronizedDataLoaderPattern(prof, should_benchmark),
619
+ GradNotSetToNonePattern(prof, should_benchmark),
620
+ Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark),
621
+ MatMulDimInFP16Pattern(prof, should_benchmark),
622
+ ]
623
+ reported = set()
624
+ summaries = []
625
+ message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"]
626
+ message_list.append("Matched Events:")
627
+
628
+ for anti_pattern in anti_patterns:
629
+ matched_events = anti_pattern.matched_events()
630
+ if not matched_events:
631
+ continue
632
+ summaries.append(anti_pattern.summary(matched_events))
633
+ for event in matched_events:
634
+ report_msg = anti_pattern.report(event)
635
+ if report_msg not in reported:
636
+ message_list.append(report_msg)
637
+ reported.add(report_msg)
638
+ src_location, line_no = source_code_location(event).split(":")
639
+ report_dict.setdefault(src_location, []).append(
640
+ {
641
+ "line_number": int(line_no),
642
+ "name": anti_pattern.name,
643
+ "url": anti_pattern.url,
644
+ "message": anti_pattern.description,
645
+ }
646
+ )
647
+
648
+ if json_report_dir is not None:
649
+ json_report_path = os.path.join(json_report_dir, "torchtidy_report.json")
650
+ if os.path.exists(json_report_path):
651
+ with open(json_report_path) as f:
652
+ exisiting_report = json.load(f)
653
+ exisiting_report.update(report_dict)
654
+ report_dict = exisiting_report
655
+ with open(json_report_path, "w") as f:
656
+ json.dump(report_dict, f, indent=4)
657
+
658
+ message_list.append("Summary:")
659
+ message_list += summaries
660
+ message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}")
661
+ if print_enable:
662
+ print("\n".join(message_list))
venv/lib/python3.10/site-packages/torch/profiler/_utils.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import re
3
+ from collections import deque
4
+ from dataclasses import dataclass
5
+ from typing import Dict, List
6
+
7
+ from torch.autograd import _KinetoEvent
8
+ from torch.autograd.profiler import profile
9
+
10
+ from torch.profiler import DeviceType
11
+
12
+
13
+ def _traverse(tree, next_fn, children_fn=lambda x: x.children, reverse: bool = False):
14
+ order = reversed if reverse else lambda x: x
15
+ remaining = deque(order(tree))
16
+ while remaining:
17
+ curr_event = next_fn(remaining)
18
+ yield curr_event
19
+ for child_event in order(children_fn(curr_event)):
20
+ remaining.append(child_event)
21
+
22
+
23
+ traverse_dfs = functools.partial(_traverse, next_fn=lambda x: x.pop(), reverse=True)
24
+ traverse_bfs = functools.partial(
25
+ _traverse, next_fn=lambda x: x.popleft(), reverse=False
26
+ )
27
+
28
+
29
+ @dataclass
30
+ class EventMetrics:
31
+ duration_time_ns: int = 0
32
+ self_time_ns: int = 0
33
+ idle_time_ns: int = 0
34
+ queue_depth: int = 0
35
+
36
+ @property
37
+ def fraction_idle_time(self):
38
+ if self.duration_time_ns == 0:
39
+ return 0.0
40
+ return self.idle_time_ns / self.duration_time_ns
41
+
42
+
43
+ @dataclass
44
+ class Interval:
45
+ start: int
46
+ end: int
47
+ queue_depth: int = 0
48
+
49
+
50
+ class EventKey:
51
+ def __init__(self, event):
52
+ self.event = event
53
+
54
+ def __hash__(self):
55
+ return hash(self.event.id)
56
+
57
+ def __eq__(self, other):
58
+ return self.event.id == other.event.id
59
+
60
+ def __repr__(self):
61
+ return f"{self.event.name}"
62
+
63
+ def intervals_overlap(self, intervals: List[Interval]):
64
+ overlap_time = 0
65
+ intervals = sorted(intervals, key=lambda x: x.start)
66
+
67
+ if intervals:
68
+ overlap_start = max(self.event.start_time_ns, intervals[0].start)
69
+ overlap_end = min(self.event.end_time_ns, intervals[0].end)
70
+
71
+ if overlap_start < overlap_end:
72
+ overlap_time += overlap_end - overlap_start
73
+
74
+ i, j = 0, 1
75
+ while j < len(intervals):
76
+ prev_interval = intervals[i]
77
+ curr_interval = intervals[j]
78
+ j += 1
79
+ if prev_interval.end > curr_interval.start:
80
+ # Completely subsumed by previous interval
81
+ if prev_interval.end > curr_interval.end:
82
+ j += 1
83
+ continue
84
+ else:
85
+ curr_interval.start = prev_interval.end
86
+ i = j
87
+
88
+ overlap_start = max(self.event.start_time_ns, curr_interval.start)
89
+ overlap_end = min(self.event.end_time_ns, curr_interval.end)
90
+ if overlap_start < overlap_end:
91
+ overlap_time += overlap_end - overlap_start
92
+
93
+ return overlap_time
94
+
95
+
96
+ class BasicEvaluation:
97
+ def __init__(self, prof: profile):
98
+ self.profile = prof
99
+ self.metrics: Dict[EventKey, EventMetrics] = {}
100
+ self.compute_self_time()
101
+ self.event_keys = sorted(
102
+ (e for e in self.metrics.keys()), key=lambda x: x.event.start_time_ns
103
+ )
104
+ self.events = [e.event for e in self.event_keys]
105
+ self.cuda_events: List[_KinetoEvent] = []
106
+ self.queue_depth_list = self.compute_queue_depth()
107
+ self.compute_idle_time()
108
+
109
+ def compute_self_time(self):
110
+ """
111
+ Computes event's self time(total time - time in child ops).
112
+ """
113
+ assert self.profile.kineto_results is not None
114
+ stack = deque(self.profile.kineto_results.experimental_event_tree())
115
+
116
+ # standard iterating dfs
117
+ while stack:
118
+ curr_event = stack.pop()
119
+ self_time = curr_event.duration_time_ns
120
+ for child_event in curr_event.children:
121
+ self_time -= child_event.duration_time_ns
122
+ stack.append(child_event)
123
+ assert (
124
+ EventKey(curr_event) not in self.metrics
125
+ ), f"Duplicate id: {curr_event.id}, {curr_event.name}"
126
+ self.metrics[EventKey(curr_event)] = EventMetrics(self_time_ns=self_time)
127
+ self.metrics[
128
+ EventKey(curr_event)
129
+ ].duration_time_ns = curr_event.duration_time_ns
130
+
131
+ def compute_queue_depth(self):
132
+ """
133
+ Computes queue_depth at each event. This will calculate the queue depth data for
134
+ All the events in the tree.
135
+ This will return a list of Interval of queue depth data of cuda launch and kernels.
136
+ """
137
+ assert self.profile.kineto_results is not None
138
+ cuda_event_list = self.profile.kineto_results.events()
139
+
140
+ def is_cuda_launch_kernel(e):
141
+ # TODO: find a better way to identify cudaLaunchKernel
142
+ return e.name == "cudaLaunchKernel"
143
+
144
+ def is_cuda_kernel(e):
145
+ # TODO: find a better way to identify CUDA Kernel
146
+ return e.device_type() == DeviceType.CUDA and "mem" not in e.name.lower()
147
+
148
+ cuda_launch_events = sorted(
149
+ (e for e in cuda_event_list if is_cuda_launch_kernel(e)),
150
+ key=lambda x: x.start_us(),
151
+ )
152
+ cuda_kernel_events = sorted(
153
+ (e for e in cuda_event_list if is_cuda_kernel(e)),
154
+ key=lambda x: x.start_us(),
155
+ )
156
+
157
+ self.cuda_events = sorted(
158
+ cuda_launch_events + cuda_kernel_events, key=lambda x: x.start_us()
159
+ )
160
+
161
+ kernel_mapping: Dict[_KinetoEvent, int] = {}
162
+ last_mapped_kernel = 0
163
+ for cuda_launch_event in cuda_launch_events:
164
+ index = index_of_first_match(
165
+ cuda_kernel_events,
166
+ lambda x: x.linked_correlation_id()
167
+ == cuda_launch_event.linked_correlation_id(),
168
+ start=last_mapped_kernel,
169
+ )
170
+ kernel_mapping[cuda_launch_event] = index
171
+ last_mapped_kernel = index if index is not None else last_mapped_kernel
172
+
173
+ current_kernel_index = 0
174
+ spawned_kernel_index = -1
175
+
176
+ all_events = cuda_launch_events + cuda_kernel_events + self.events
177
+
178
+ def new_old_event_comparator(event):
179
+ if hasattr(event, "start_us"):
180
+ return event.start_us() * 1000
181
+ if hasattr(event, "start_time_ns"):
182
+ return event.start_time_ns
183
+ raise Exception("Unknown Event Type")
184
+
185
+ queue_depth_list: List[Interval] = []
186
+ all_events.sort(key=new_old_event_comparator)
187
+ for event in all_events:
188
+ # Find latest cuda kernel event
189
+ if hasattr(event, "start_us"):
190
+ start_time = event.start_us() * 1000
191
+ end_time = (event.start_us() + event.duration_us()) * 1000
192
+ # Find current spawned cuda kernel event
193
+ if event in kernel_mapping and kernel_mapping[event] is not None:
194
+ spawned_kernel_index = kernel_mapping[event]
195
+ elif hasattr(event, "start_time_ns"):
196
+ start_time = event.start_time_ns # type: ignore[attr-defined]
197
+ end_time = event.end_time_ns # type: ignore[attr-defined]
198
+
199
+ while (
200
+ current_kernel_index < len(cuda_kernel_events)
201
+ and (cuda_kernel_events[current_kernel_index].start_us()) * 1000
202
+ <= start_time # type: ignore[possibly-undefined]
203
+ ):
204
+ current_kernel_index += 1
205
+ current_queue_depth = spawned_kernel_index - current_kernel_index + 1
206
+ current_queue_depth = max(current_queue_depth, 0)
207
+
208
+ if hasattr(event, "start_us"):
209
+ queue_depth_list.append(
210
+ Interval(start_time, end_time, current_queue_depth) # type: ignore[possibly-undefined]
211
+ )
212
+ elif hasattr(event, "start_time_ns"):
213
+ self.metrics[EventKey(event)].queue_depth = current_queue_depth
214
+
215
+ return queue_depth_list
216
+
217
+ def compute_idle_time(self):
218
+ """
219
+ Computes idle time of the profile.
220
+ """
221
+ # Based on queue_depth_list, we can calculate idle time for all the events
222
+ idle = False
223
+ idle_start = 0
224
+ idle_intervals: List[Interval] = []
225
+ if self.queue_depth_list and self.events:
226
+ idle_intervals += [
227
+ Interval(self.events[0].start_time_ns, self.queue_depth_list[0].start),
228
+ Interval(self.queue_depth_list[-1].end, self.events[-1].end_time_ns),
229
+ ]
230
+
231
+ for data_point in self.queue_depth_list:
232
+ if data_point.queue_depth == 0 and not idle:
233
+ idle_start = data_point.end
234
+ idle = True
235
+ if data_point.queue_depth > 0 and idle:
236
+ idle_intervals.append(Interval(idle_start, data_point.start))
237
+ idle = False
238
+
239
+ event_list = [e.event for e in self.metrics.keys()]
240
+ for event in event_list:
241
+ self.metrics[EventKey(event)].idle_time_ns = EventKey(
242
+ event
243
+ ).intervals_overlap(idle_intervals)
244
+
245
+ def rank_events(self, length):
246
+ """
247
+ Filter and Rank the events based on some heuristics:
248
+ 1) Events that are in the falling phase of the queue depth.
249
+ 2) Events that have a high idle_time, self_time difference.
250
+
251
+ Parameters:
252
+ length: The number of events to return.
253
+ """
254
+
255
+ # Find the interval when qd is falling to 0
256
+ import torch
257
+
258
+ queue_depth_list = list(reversed(self.queue_depth_list))
259
+ qd_values = [e.queue_depth for e in queue_depth_list]
260
+
261
+ bottom_threashold = 0
262
+ top_threashold = 4
263
+ decrease_interval = []
264
+ i = 0
265
+ while i < len(qd_values):
266
+ if qd_values[i] > bottom_threashold:
267
+ i += 1
268
+ continue
269
+ for j in range(i + 1, len(qd_values)):
270
+ # Find next zero and if the max value between them exceeds
271
+ # the threshold, then we have a falling interval
272
+ next_minimum_idx = index_of_first_match(
273
+ qd_values, lambda x: x <= bottom_threashold, start=j
274
+ )
275
+ peak_idx = argmax(qd_values, start=j, end=next_minimum_idx)
276
+
277
+ # if is a valid peak, we add to list and continue
278
+ if peak_idx is not None and qd_values[peak_idx] >= top_threashold:
279
+ decrease_interval.append(
280
+ Interval(
281
+ queue_depth_list[peak_idx].start, queue_depth_list[i].start
282
+ )
283
+ )
284
+ i = next_minimum_idx if next_minimum_idx is not None else i
285
+ break
286
+ i += 1
287
+ # Filter out events that are not in the decrease interval
288
+ event_list = [
289
+ event
290
+ for event in self.metrics.keys()
291
+ if event.intervals_overlap(decrease_interval)
292
+ ]
293
+ if event_list:
294
+ self_time = torch.tensor(
295
+ [self.metrics[event].self_time_ns for event in event_list],
296
+ dtype=torch.float32,
297
+ )
298
+ idle_time = torch.tensor(
299
+ [self.metrics[event].fraction_idle_time for event in event_list],
300
+ dtype=torch.float32,
301
+ )
302
+ normalized_gain = (idle_time - torch.mean(idle_time)) / torch.std(idle_time)
303
+ normalized_self = (self_time - torch.mean(self_time)) / torch.std(self_time)
304
+ heuristic_score_list = normalized_gain + 0.6 * normalized_self
305
+
306
+ # Sort events by heuristic
307
+ event_list = [
308
+ event
309
+ for _, event in sorted(
310
+ zip(heuristic_score_list, event_list),
311
+ key=lambda x: x[0],
312
+ reverse=True,
313
+ )
314
+ ]
315
+ event_list = event_list[:length]
316
+ return event_list
317
+
318
+ def get_optimizable_events(self, length: int = 1, print_enable: bool = True):
319
+ event_list = self.rank_events(length)
320
+ if not print_enable:
321
+ return event_list
322
+ output = "Optimizable events:\n" if event_list else "No events to optimize\n"
323
+
324
+ output += "\n".join(
325
+ [
326
+ f"""{'-'*80}
327
+ Event: {event}
328
+ Source code location: {source_code_location(event.event)}
329
+ Percentage idle time: {self.metrics[event].fraction_idle_time * 100:.2f}%
330
+ {'-'*80}"""
331
+ for event in event_list
332
+ ]
333
+ )
334
+ if print_enable:
335
+ print(output)
336
+ return event_list
337
+
338
+
339
+ def index_of_first_match(seq, predicate, start=0, end=None):
340
+ if end is None or end >= len(seq):
341
+ end = len(seq)
342
+ for i in range(start, end):
343
+ if predicate(seq[i]):
344
+ return i
345
+ return None
346
+
347
+
348
+ def argmax(seq, key=lambda x: x, start=0, end=None):
349
+ seq = seq[start:end]
350
+ if len(seq) == 0:
351
+ return None
352
+ return seq.index(max(seq, key=key)) + start
353
+
354
+
355
+ def source_code_location(event):
356
+ while event is not None:
357
+ match = re.search(r"\.py\(.*\)", event.name)
358
+ if match is None:
359
+ event = event.parent
360
+ continue
361
+ return event.name
362
+ return "No source code location found"
363
+
364
+
365
+ # Provide an OSS workaround for cudagraphs + CUPTI issue
366
+ # https://github.com/pytorch/pytorch/issues/75504
367
+ # TODO(dberard) - deprecate / remove workaround for CUDA >= 12, when
368
+ # we stop supporting older CUDA versions.
369
+ def _init_for_cuda_graphs():
370
+ from torch.autograd.profiler import profile
371
+
372
+ with profile():
373
+ pass
venv/lib/python3.10/site-packages/torch/profiler/itt.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+
3
+ try:
4
+ from torch._C import _itt
5
+ except ImportError:
6
+
7
+ class _ITTStub:
8
+ @staticmethod
9
+ def _fail(*args, **kwargs):
10
+ raise RuntimeError(
11
+ "ITT functions not installed. Are you sure you have a ITT build?"
12
+ )
13
+
14
+ @staticmethod
15
+ def is_available():
16
+ return False
17
+
18
+ rangePush = _fail
19
+ rangePop = _fail
20
+ mark = _fail
21
+
22
+ _itt = _ITTStub() # type: ignore[assignment]
23
+
24
+
25
+ __all__ = ["is_available", "range_push", "range_pop", "mark", "range"]
26
+
27
+
28
+ def is_available():
29
+ """
30
+ Check if ITT feature is available or not
31
+ """
32
+ return _itt.is_available()
33
+
34
+
35
+ def range_push(msg):
36
+ """
37
+ Pushes a range onto a stack of nested range span. Returns zero-based
38
+ depth of the range that is started.
39
+
40
+ Arguments:
41
+ msg (str): ASCII message to associate with range
42
+ """
43
+ return _itt.rangePush(msg)
44
+
45
+
46
+ def range_pop():
47
+ """
48
+ Pops a range off of a stack of nested range spans. Returns the
49
+ zero-based depth of the range that is ended.
50
+ """
51
+ return _itt.rangePop()
52
+
53
+
54
+ def mark(msg):
55
+ """
56
+ Describe an instantaneous event that occurred at some point.
57
+
58
+ Arguments:
59
+ msg (str): ASCII message to associate with the event.
60
+ """
61
+ return _itt.mark(msg)
62
+
63
+
64
+ @contextmanager
65
+ def range(msg, *args, **kwargs):
66
+ """
67
+ Context manager / decorator that pushes an ITT range at the beginning
68
+ of its scope, and pops it at the end. If extra arguments are given,
69
+ they are passed as arguments to msg.format().
70
+
71
+ Args:
72
+ msg (str): message to associate with the range
73
+ """
74
+ range_push(msg.format(*args, **kwargs))
75
+ try:
76
+ yield
77
+ finally:
78
+ range_pop()
venv/lib/python3.10/site-packages/torch/profiler/profiler.py ADDED
@@ -0,0 +1,839 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gzip
2
+ import json
3
+ import os
4
+ import tempfile
5
+ from abc import ABC, abstractmethod
6
+ from enum import Enum
7
+ from functools import partial
8
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
9
+ from warnings import warn
10
+
11
+ from typing_extensions import Self
12
+
13
+ import torch
14
+ import torch.autograd.profiler as prof
15
+ from torch._C import _get_privateuse1_backend_name
16
+ from torch._C._profiler import (
17
+ _add_execution_trace_observer,
18
+ _disable_execution_trace_observer,
19
+ _enable_execution_trace_observer,
20
+ _ExperimentalConfig,
21
+ _remove_execution_trace_observer,
22
+ )
23
+ from torch.autograd import kineto_available, ProfilerActivity
24
+ from torch.profiler._memory_profiler import MemoryProfile, MemoryProfileTimeline
25
+
26
+
27
+ __all__ = [
28
+ "supported_activities",
29
+ "ProfilerAction",
30
+ "schedule",
31
+ "tensorboard_trace_handler",
32
+ "profile",
33
+ "ExecutionTraceObserver",
34
+ ]
35
+ PROFILER_STEP_NAME = "ProfilerStep"
36
+
37
+
38
+ def supported_activities():
39
+ """
40
+ Returns a set of supported profiler tracing activities.
41
+
42
+ Note: profiler uses CUPTI library to trace on-device CUDA kernels.
43
+ In case when CUDA is enabled but CUPTI is not available, passing
44
+ ``ProfilerActivity.CUDA`` to profiler results in using the legacy CUDA
45
+ profiling code (same as in the legacy ``torch.autograd.profiler``).
46
+ This, in turn, results in including CUDA time in the profiler table output,
47
+ but not in the JSON trace.
48
+ """
49
+ return torch.autograd._supported_activities()
50
+
51
+
52
+ class _ITraceObserver(ABC):
53
+ """Abstract interface for a Trace observer.
54
+ This satisfies 3 methods: start, stop and cleanup"""
55
+
56
+ @abstractmethod
57
+ def start(self):
58
+ pass
59
+
60
+ @abstractmethod
61
+ def stop(self):
62
+ pass
63
+
64
+ @abstractmethod
65
+ def cleanup(self):
66
+ pass
67
+
68
+
69
+ class _KinetoProfile:
70
+ """Low-level profiler wrap the autograd profile
71
+
72
+ Args:
73
+ activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
74
+ ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
75
+ Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
76
+ record_shapes (bool): save information about operator's input shapes.
77
+ profile_memory (bool): track tensor memory allocation/deallocation (see ``export_memory_timeline``
78
+ for more details).
79
+ with_stack (bool): record source information (file and line number) for the ops.
80
+ with_flops (bool): use formula to estimate the FLOPS of specific operators
81
+ (matrix multiplication and 2D convolution).
82
+ with_modules (bool): record module hierarchy (including function names)
83
+ corresponding to the callstack of the op. e.g. If module A's forward call's
84
+ module B's forward which contains an aten::add op,
85
+ then aten::add's module hierarchy is A.B
86
+ Note that this support exist, at the moment, only for TorchScript models
87
+ and not eager mode models.
88
+ experimental_config (_ExperimentalConfig) : A set of experimental options
89
+ used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed.
90
+ execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object.
91
+ `PyTorch Execution Traces <https://arxiv.org/pdf/2305.14516.pdf>`__ offer a graph based
92
+ representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators.
93
+ When this argument is included the observer start() and stop() will be called for the
94
+ same time window as PyTorch profiler.
95
+
96
+ .. note::
97
+ This API is experimental and subject to change in the future.
98
+
99
+ Enabling shape and stack tracing results in additional overhead.
100
+ When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
101
+ that may further prevent certain optimizations that depend on the reference count and introduce
102
+ extra tensor copies.
103
+ """
104
+
105
+ def __init__(
106
+ self,
107
+ *,
108
+ activities: Optional[Iterable[ProfilerActivity]] = None,
109
+ record_shapes: bool = False,
110
+ profile_memory: bool = False,
111
+ with_stack: bool = False,
112
+ with_flops: bool = False,
113
+ with_modules: bool = False,
114
+ experimental_config: Optional[_ExperimentalConfig] = None,
115
+ execution_trace_observer: Optional[_ITraceObserver] = None,
116
+ ):
117
+ self.activities = set(activities) if activities else supported_activities()
118
+ self.record_shapes = record_shapes
119
+ self.with_flops = with_flops
120
+ self.profile_memory = profile_memory
121
+ self.with_stack = with_stack
122
+ self.with_modules = with_modules
123
+ self.experimental_config = experimental_config
124
+ self.execution_trace_observer = execution_trace_observer
125
+ self.profiler: Optional[prof.profile] = None
126
+ self.mem_tl: Optional[MemoryProfileTimeline] = None
127
+ self.use_device = None
128
+ privateuse1_backend = _get_privateuse1_backend_name()
129
+ if privateuse1_backend != "privateuseone":
130
+ self.use_device = privateuse1_backend
131
+ # user-defined metadata to be amended to the trace
132
+ self.preset_metadata: Dict[str, str] = dict()
133
+
134
+ def start(self):
135
+ self.prepare_trace()
136
+ self.start_trace()
137
+
138
+ def stop(self):
139
+ self.stop_trace()
140
+
141
+ def prepare_trace(self):
142
+ self.profiler = prof.profile(
143
+ use_cuda=(ProfilerActivity.CUDA in self.activities),
144
+ use_cpu=(ProfilerActivity.CPU in self.activities),
145
+ use_mtia=(ProfilerActivity.MTIA in self.activities),
146
+ use_device=None,
147
+ record_shapes=self.record_shapes,
148
+ with_flops=self.with_flops,
149
+ profile_memory=self.profile_memory,
150
+ with_stack=self.with_stack,
151
+ with_modules=self.with_modules,
152
+ use_kineto=True,
153
+ experimental_config=self.experimental_config,
154
+ )
155
+ self.profiler._prepare_trace()
156
+
157
+ def start_trace(self):
158
+ if self.execution_trace_observer:
159
+ self.execution_trace_observer.start()
160
+ assert self.profiler is not None
161
+ self.profiler._start_trace()
162
+
163
+ if self.profile_memory:
164
+ self.add_metadata_json("profile_memory", "1")
165
+ if self.with_stack:
166
+ self.add_metadata_json("with_stack", "1")
167
+ if self.record_shapes:
168
+ self.add_metadata_json("record_shapes", "1")
169
+ if self.with_modules:
170
+ self.add_metadata_json("with_modules", "1")
171
+ if self.with_flops:
172
+ self.add_metadata_json("with_flops", "1")
173
+
174
+ if kineto_available():
175
+ dist_info = self._get_distributed_info()
176
+ if dist_info:
177
+ self.add_metadata_json("distributedInfo", json.dumps(dist_info))
178
+
179
+ if hasattr(torch, "_inductor"):
180
+ import torch._inductor.config as inductor_config
181
+
182
+ if inductor_config.triton.cudagraphs:
183
+ os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
184
+ self.add_metadata_json("DISABLE_CUPTI_LAZY_REINIT", "1")
185
+ # FIXME: CUDA Graph does not work well with CUPTI teardown.
186
+ # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
187
+ # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
188
+ # Workaround: turn off CUPTI teardown when using CUDA Graphs.
189
+ os.environ["TEARDOWN_CUPTI"] = "0"
190
+
191
+ # Insert the preset user metadata to the trace
192
+ for k, v in self.preset_metadata.items():
193
+ self.add_metadata_json(k, v)
194
+
195
+ def stop_trace(self):
196
+ if self.execution_trace_observer:
197
+ self.execution_trace_observer.stop()
198
+ assert self.profiler is not None
199
+ self.profiler.__exit__(None, None, None)
200
+
201
+ def export_chrome_trace(self, path: str):
202
+ """
203
+ Exports the collected trace in Chrome JSON format.
204
+ """
205
+ assert self.profiler
206
+ if path.endswith(".gz"):
207
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False)
208
+ fp.close()
209
+ retvalue = self.profiler.export_chrome_trace(fp.name)
210
+ with open(fp.name) as fin:
211
+ with gzip.open(path, "wt") as fout:
212
+ fout.writelines(fin)
213
+ os.remove(fp.name)
214
+ return retvalue
215
+ else:
216
+ return self.profiler.export_chrome_trace(path)
217
+
218
+ def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
219
+ """Save stack traces in a file in a format suitable for visualization.
220
+
221
+ Args:
222
+ path (str): save stacks file to this location;
223
+ metric (str): metric to use: "self_cpu_time_total" or "self_cuda_time_total"
224
+
225
+ .. note::
226
+ Example of using FlameGraph tool:
227
+
228
+ - git clone https://github.com/brendangregg/FlameGraph
229
+ - cd FlameGraph
230
+ - ./flamegraph.pl --title "CPU time" --countname "us." profiler.stacks > perf_viz.svg
231
+ """
232
+ assert self.profiler
233
+ return self.profiler.export_stacks(path, metric)
234
+
235
+ def key_averages(
236
+ self, group_by_input_shape: bool = False, group_by_stack_n: int = 0
237
+ ):
238
+ """Averages events, grouping them by operator name and (optionally) input shapes and
239
+ stack.
240
+
241
+ .. note::
242
+ To use shape/stack functionality make sure to set record_shapes/with_stack
243
+ when creating profiler context manager.
244
+ """
245
+ assert self.profiler
246
+ return self.profiler.key_averages(group_by_input_shape, group_by_stack_n)
247
+
248
+ def events(self):
249
+ """
250
+ Returns the list of unaggregated profiler events,
251
+ to be used in the trace callback or after the profiling is finished
252
+ """
253
+ assert self.profiler
254
+ return self.profiler.function_events
255
+
256
+ def add_metadata(self, key: str, value: str):
257
+ """
258
+ Adds a user defined metadata with a string key and a string value
259
+ into the trace file
260
+ """
261
+ wrapped_value = '"' + value.replace('"', '\\"') + '"'
262
+ torch.autograd._add_metadata_json(key, wrapped_value)
263
+
264
+ def add_metadata_json(self, key: str, value: str):
265
+ """
266
+ Adds a user defined metadata with a string key and a valid json value
267
+ into the trace file
268
+ """
269
+ torch.autograd._add_metadata_json(key, value)
270
+
271
+ def preset_metadata_json(self, key: str, value: str):
272
+ """
273
+ Preset a user defined metadata when the profiler is not started
274
+ and added into the trace file later.
275
+ Metadata is in the format of a string key and a valid json value
276
+ """
277
+ self.preset_metadata[key] = value
278
+
279
+ def _get_distributed_info(self):
280
+ import torch.distributed as dist
281
+
282
+ if not dist.is_available() or not dist.is_initialized():
283
+ return None
284
+
285
+ backend = dist.get_backend()
286
+ dist_info = {
287
+ "backend": backend,
288
+ "rank": dist.get_rank(),
289
+ "world_size": dist.get_world_size(),
290
+ "pg_count": dist.get_pg_count(),
291
+ "pg_config": dist.distributed_c10d._get_all_pg_configs(),
292
+ }
293
+ if backend == "nccl":
294
+ nccl_version = torch.cuda.nccl.version()
295
+ dist_info["nccl_version"] = ".".join(str(v) for v in nccl_version)
296
+ return dist_info
297
+
298
+ def _memory_profile(self) -> MemoryProfile:
299
+ required = ("record_shapes", "profile_memory", "with_stack")
300
+ missing = [f"{i}=True" for i in required if not getattr(self, i)]
301
+ if missing:
302
+ raise ValueError(f"{', '.join(missing)} required for memory profiling.")
303
+
304
+ assert self.profiler is not None and self.profiler.kineto_results is not None
305
+ return MemoryProfile(self.profiler.kineto_results)
306
+
307
+ def export_memory_timeline(self, path: str, device: Optional[str] = None) -> None:
308
+ """Export memory event information from the profiler collected
309
+ tree for a given device, and export a timeline plot. There are 3
310
+ exportable files using ``export_memory_timeline``, each controlled by the
311
+ ``path``'s suffix.
312
+
313
+ - For an HTML compatible plot, use the suffix ``.html``, and a memory timeline
314
+ plot will be embedded as a PNG file in the HTML file.
315
+
316
+ - For plot points consisting of ``[times, [sizes by category]]``, where
317
+ ``times`` are timestamps and ``sizes`` are memory usage for each category.
318
+ The memory timeline plot will be saved a JSON (``.json``) or gzipped JSON
319
+ (``.json.gz``) depending on the suffix.
320
+
321
+ - For raw memory points, use the suffix ``.raw.json.gz``. Each raw memory
322
+ event will consist of ``(timestamp, action, numbytes, category)``, where
323
+ ``action`` is one of ``[PREEXISTING, CREATE, INCREMENT_VERSION, DESTROY]``,
324
+ and ``category`` is one of the enums from
325
+ ``torch.profiler._memory_profiler.Category``.
326
+
327
+ Output: Memory timeline written as gzipped JSON, JSON, or HTML.
328
+ """
329
+ # Default to device 0, if unset. Fallback on cpu.
330
+ if device is None and self.use_device and self.use_device != "cuda":
331
+ device = self.use_device + ":0"
332
+
333
+ if device is None:
334
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
335
+
336
+ # Construct the memory timeline plot data
337
+ self.mem_tl = MemoryProfileTimeline(self._memory_profile())
338
+
339
+ # Depending on the file suffix, save the data as json.gz or json.
340
+ # For html, we can embed the image into an HTML file.
341
+ if path.endswith(".html"):
342
+ self.mem_tl.export_memory_timeline_html(path, device)
343
+ elif path.endswith(".gz"):
344
+ fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False)
345
+ fp.close()
346
+ if path.endswith("raw.json.gz"):
347
+ self.mem_tl.export_memory_timeline_raw(fp.name, device)
348
+ else:
349
+ self.mem_tl.export_memory_timeline(fp.name, device)
350
+ with open(fp.name) as fin:
351
+ with gzip.open(path, "wt") as fout:
352
+ fout.writelines(fin)
353
+ os.remove(fp.name)
354
+ else:
355
+ self.mem_tl.export_memory_timeline(path, device)
356
+
357
+
358
+ class ProfilerAction(Enum):
359
+ """
360
+ Profiler actions that can be taken at the specified intervals
361
+ """
362
+
363
+ NONE = 0
364
+ WARMUP = 1
365
+ RECORD = 2
366
+ RECORD_AND_SAVE = 3
367
+
368
+
369
+ def schedule(
370
+ *, wait: int, warmup: int, active: int, repeat: int = 0, skip_first: int = 0
371
+ ) -> Callable:
372
+ """
373
+ Returns a callable that can be used as profiler ``schedule`` argument. The profiler will skip
374
+ the first ``skip_first`` steps, then wait for ``wait`` steps, then do the warmup for the next ``warmup`` steps,
375
+ then do the active recording for the next ``active`` steps and then repeat the cycle starting with ``wait`` steps.
376
+ The optional number of cycles is specified with the ``repeat`` parameter, the zero value means that
377
+ the cycles will continue until the profiling is finished.
378
+ """
379
+
380
+ def schedule_fn(step: int) -> ProfilerAction:
381
+ assert step >= 0
382
+ if step < skip_first:
383
+ return ProfilerAction.NONE
384
+ else:
385
+ step -= skip_first
386
+ num_steps = wait + warmup + active
387
+ if repeat > 0 and step / num_steps >= repeat:
388
+ return ProfilerAction.NONE
389
+ mod_step = step % num_steps
390
+ if mod_step < wait:
391
+ return ProfilerAction.NONE
392
+ elif mod_step < wait + warmup:
393
+ return ProfilerAction.WARMUP
394
+ else:
395
+ return (
396
+ ProfilerAction.RECORD
397
+ if mod_step < num_steps - 1
398
+ else ProfilerAction.RECORD_AND_SAVE
399
+ )
400
+
401
+ assert (
402
+ wait >= 0 and warmup >= 0 and active > 0 and repeat >= 0 and skip_first >= 0
403
+ ), "Invalid profiler schedule arguments"
404
+ if warmup == 0:
405
+ warn("Profiler won't be using warmup, this can skew profiler results")
406
+ return schedule_fn
407
+
408
+
409
+ def _default_schedule_fn(_: int) -> ProfilerAction:
410
+ """
411
+ Default profiler behavior - immediately starts recording the events,
412
+ keeps doing it on every profiler step.
413
+ """
414
+ return ProfilerAction.RECORD
415
+
416
+
417
+ def tensorboard_trace_handler(
418
+ dir_name: str, worker_name: Optional[str] = None, use_gzip: bool = False
419
+ ):
420
+ """
421
+ Outputs tracing files to directory of ``dir_name``, then that directory can be
422
+ directly delivered to tensorboard as logdir.
423
+ ``worker_name`` should be unique for each worker in distributed scenario,
424
+ it will be set to '[hostname]_[pid]' by default.
425
+ """
426
+ import os
427
+ import socket
428
+ import time
429
+
430
+ def handler_fn(prof) -> None:
431
+ nonlocal worker_name
432
+ if not os.path.isdir(dir_name):
433
+ try:
434
+ os.makedirs(dir_name, exist_ok=True)
435
+ except Exception as e:
436
+ raise RuntimeError("Can't create directory: " + dir_name) from e
437
+ if not worker_name:
438
+ worker_name = f"{socket.gethostname()}_{os.getpid()}"
439
+ # Use nanosecond here to avoid naming clash when exporting the trace
440
+ file_name = f"{worker_name}.{time.time_ns()}.pt.trace.json"
441
+ if use_gzip:
442
+ file_name = file_name + ".gz"
443
+ prof.export_chrome_trace(os.path.join(dir_name, file_name))
444
+
445
+ return handler_fn
446
+
447
+
448
+ class profile(_KinetoProfile):
449
+ """Profiler context manager.
450
+
451
+ Args:
452
+ activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values:
453
+ ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``.
454
+ Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA.
455
+ schedule (Callable): callable that takes step (int) as a single parameter and returns
456
+ ``ProfilerAction`` value that specifies the profiler action to perform at each step.
457
+ on_trace_ready (Callable): callable that is called at each step when ``schedule``
458
+ returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling.
459
+ record_shapes (bool): save information about operator's input shapes.
460
+ profile_memory (bool): track tensor memory allocation/deallocation.
461
+ with_stack (bool): record source information (file and line number) for the ops.
462
+ with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators
463
+ (matrix multiplication and 2D convolution).
464
+ with_modules (bool): record module hierarchy (including function names)
465
+ corresponding to the callstack of the op. e.g. If module A's forward call's
466
+ module B's forward which contains an aten::add op,
467
+ then aten::add's module hierarchy is A.B
468
+ Note that this support exist, at the moment, only for TorchScript models
469
+ and not eager mode models.
470
+ experimental_config (_ExperimentalConfig) : A set of experimental options
471
+ used for Kineto library features. Note, backward compatibility is not guaranteed.
472
+ execution_trace_observer (ExecutionTraceObserver) : A PyTorch Execution Trace Observer object.
473
+ `PyTorch Execution Traces <https://arxiv.org/pdf/2305.14516.pdf>`__ offer a graph based
474
+ representation of AI/ML workloads and enable replay benchmarks, simulators, and emulators.
475
+ When this argument is included the observer start() and stop() will be called for the
476
+ same time window as PyTorch profiler. See the examples section below for a code sample.
477
+ use_cuda (bool):
478
+ .. deprecated:: 1.8.1
479
+ use ``activities`` instead.
480
+
481
+ .. note::
482
+ Use :func:`~torch.profiler.schedule` to generate the callable schedule.
483
+ Non-default schedules are useful when profiling long training jobs
484
+ and allow the user to obtain multiple traces at the different iterations
485
+ of the training process.
486
+ The default schedule simply records all the events continuously for the
487
+ duration of the context manager.
488
+
489
+ .. note::
490
+ Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard:
491
+
492
+ ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)``
493
+
494
+ After profiling, result files can be found in the specified directory. Use the command:
495
+
496
+ ``tensorboard --logdir dir_name``
497
+
498
+ to see the results in TensorBoard.
499
+ For more information, see
500
+ `PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__
501
+
502
+ .. note::
503
+ Enabling shape and stack tracing results in additional overhead.
504
+ When record_shapes=True is specified, profiler will temporarily hold references to the tensors;
505
+ that may further prevent certain optimizations that depend on the reference count and introduce
506
+ extra tensor copies.
507
+
508
+
509
+ Examples:
510
+
511
+ .. code-block:: python
512
+
513
+ with torch.profiler.profile(
514
+ activities=[
515
+ torch.profiler.ProfilerActivity.CPU,
516
+ torch.profiler.ProfilerActivity.CUDA,
517
+ ]
518
+ ) as p:
519
+ code_to_profile()
520
+ print(p.key_averages().table(
521
+ sort_by="self_cuda_time_total", row_limit=-1))
522
+
523
+ Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions:
524
+
525
+ .. code-block:: python
526
+
527
+ # Non-default profiler schedule allows user to turn profiler on and off
528
+ # on different iterations of the training loop;
529
+ # trace_handler is called every time a new trace becomes available
530
+ def trace_handler(prof):
531
+ print(prof.key_averages().table(
532
+ sort_by="self_cuda_time_total", row_limit=-1))
533
+ # prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json")
534
+
535
+ with torch.profiler.profile(
536
+ activities=[
537
+ torch.profiler.ProfilerActivity.CPU,
538
+ torch.profiler.ProfilerActivity.CUDA,
539
+ ],
540
+
541
+ # In this example with wait=1, warmup=1, active=2, repeat=1,
542
+ # profiler will skip the first step/iteration,
543
+ # start warming up on the second, record
544
+ # the third and the forth iterations,
545
+ # after which the trace will become available
546
+ # and on_trace_ready (when set) is called;
547
+ # the cycle repeats starting with the next step
548
+
549
+ schedule=torch.profiler.schedule(
550
+ wait=1,
551
+ warmup=1,
552
+ active=2,
553
+ repeat=1),
554
+ on_trace_ready=trace_handler
555
+ # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log')
556
+ # used when outputting for tensorboard
557
+ ) as p:
558
+ for iter in range(N):
559
+ code_iteration_to_profile(iter)
560
+ # send a signal to the profiler that the next iteration has started
561
+ p.step()
562
+
563
+ The following sample shows how to setup up an Execution Trace Observer (`execution_trace_observer`)
564
+
565
+ .. code-block:: python
566
+
567
+ with torch.profiler.profile(
568
+ ...
569
+ execution_trace_observer=(
570
+ ExecutionTraceObserver().register_callback("./execution_trace.json")
571
+ ),
572
+ ) as p:
573
+ for iter in range(N):
574
+ code_iteration_to_profile(iter)
575
+ p.step()
576
+
577
+ You can also refer to test_execution_trace_with_kineto() in tests/profiler/test_profiler.py.
578
+ Note: One can also pass any object satisfying the _ITraceObserver interface.
579
+ """
580
+
581
+ def __init__(
582
+ self,
583
+ *,
584
+ activities: Optional[Iterable[ProfilerActivity]] = None,
585
+ schedule: Optional[Callable[[int], ProfilerAction]] = None,
586
+ on_trace_ready: Optional[Callable[..., Any]] = None,
587
+ record_shapes: bool = False,
588
+ profile_memory: bool = False,
589
+ with_stack: bool = False,
590
+ with_flops: bool = False,
591
+ with_modules: bool = False,
592
+ experimental_config: Optional[_ExperimentalConfig] = None,
593
+ execution_trace_observer: Optional[_ITraceObserver] = None,
594
+ # deprecated:
595
+ use_cuda: Optional[bool] = None,
596
+ ):
597
+ activities_set = set(activities) if activities else supported_activities()
598
+ if use_cuda is not None:
599
+ warn("use_cuda is deprecated, use activities argument instead")
600
+ if use_cuda:
601
+ activities_set.add(ProfilerActivity.CUDA)
602
+ elif ProfilerActivity.CUDA in activities_set:
603
+ activities_set.remove(ProfilerActivity.CUDA)
604
+ assert len(activities_set) > 0, "No valid profiler activities found"
605
+
606
+ super().__init__(
607
+ activities=activities,
608
+ record_shapes=record_shapes,
609
+ profile_memory=profile_memory,
610
+ with_stack=with_stack,
611
+ with_flops=with_flops,
612
+ with_modules=with_modules,
613
+ experimental_config=experimental_config,
614
+ execution_trace_observer=execution_trace_observer,
615
+ )
616
+
617
+ if schedule:
618
+ self.schedule = schedule
619
+ # add step markers into the trace and table view
620
+ self.record_steps = True
621
+ else:
622
+ self.schedule = _default_schedule_fn
623
+ self.record_steps = False
624
+ self.on_trace_ready = on_trace_ready
625
+ self.step_num = 0
626
+ self.current_action = self.schedule(self.step_num)
627
+ self.step_rec_fn: Optional[prof.record_function] = None
628
+
629
+ self.action_map: Dict[
630
+ Tuple[ProfilerAction, Optional[ProfilerAction]], List[Any]
631
+ ] = {
632
+ # key is (prev_action, current_action), value is action list corresponding to the state pair.
633
+ (ProfilerAction.NONE, ProfilerAction.NONE): [],
634
+ (ProfilerAction.NONE, ProfilerAction.WARMUP): [self.prepare_trace],
635
+ (ProfilerAction.NONE, ProfilerAction.RECORD): [
636
+ self.prepare_trace,
637
+ self.start_trace,
638
+ ],
639
+ (ProfilerAction.NONE, ProfilerAction.RECORD_AND_SAVE): [
640
+ self.prepare_trace,
641
+ self.start_trace,
642
+ ],
643
+ (ProfilerAction.WARMUP, ProfilerAction.NONE): [
644
+ partial(warn, "Incorrect schedule: WARMUP followed by NONE"),
645
+ self.start_trace,
646
+ self.stop_trace,
647
+ ],
648
+ (ProfilerAction.WARMUP, ProfilerAction.WARMUP): [],
649
+ (ProfilerAction.WARMUP, ProfilerAction.RECORD): [self.start_trace],
650
+ (ProfilerAction.WARMUP, ProfilerAction.RECORD_AND_SAVE): [self.start_trace],
651
+ (ProfilerAction.RECORD, ProfilerAction.NONE): [
652
+ partial(warn, "Incorrect schedule: RECORD followed by NONE"),
653
+ self.stop_trace,
654
+ ],
655
+ (ProfilerAction.RECORD, ProfilerAction.WARMUP): [
656
+ partial(warn, "Incorrect schedule: RECORD followed by WARMUP"),
657
+ self.stop_trace,
658
+ ],
659
+ (ProfilerAction.RECORD, ProfilerAction.RECORD): [],
660
+ (ProfilerAction.RECORD, ProfilerAction.RECORD_AND_SAVE): [],
661
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.NONE): [
662
+ self.stop_trace,
663
+ self._trace_ready,
664
+ ],
665
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.WARMUP): [
666
+ self.stop_trace,
667
+ self._trace_ready,
668
+ self.prepare_trace,
669
+ ],
670
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD): [
671
+ self.stop_trace,
672
+ self._trace_ready,
673
+ self.prepare_trace,
674
+ self.start_trace,
675
+ ],
676
+ (ProfilerAction.RECORD_AND_SAVE, ProfilerAction.RECORD_AND_SAVE): [
677
+ self.stop_trace,
678
+ self._trace_ready,
679
+ self.prepare_trace,
680
+ self.start_trace,
681
+ ],
682
+ # used for exit action
683
+ (ProfilerAction.WARMUP, None): [self.start_trace, self.stop_trace],
684
+ (ProfilerAction.RECORD, None): [self.stop_trace, self._trace_ready],
685
+ (ProfilerAction.RECORD_AND_SAVE, None): [
686
+ self.stop_trace,
687
+ self._trace_ready,
688
+ ],
689
+ }
690
+ # Start tracking increments to profiler step, this will be used
691
+ # by Kineto
692
+ prof.KinetoStepTracker.init_step_count(PROFILER_STEP_NAME)
693
+
694
+ def __enter__(self):
695
+ self.start()
696
+ return self
697
+
698
+ def __exit__(self, exc_type, exc_val, exc_tb):
699
+ self.stop()
700
+ prof.KinetoStepTracker.erase_step_count(PROFILER_STEP_NAME)
701
+ if self.execution_trace_observer:
702
+ self.execution_trace_observer.cleanup()
703
+
704
+ def start(self):
705
+ self._transit_action(ProfilerAction.NONE, self.current_action)
706
+ if self.record_steps:
707
+ self.step_rec_fn = prof.record_function(
708
+ "ProfilerStep#" + str(self.step_num)
709
+ )
710
+ self.step_rec_fn.__enter__()
711
+
712
+ def stop(self):
713
+ if self.record_steps and self.step_rec_fn:
714
+ self.step_rec_fn.__exit__(None, None, None)
715
+ self._transit_action(self.current_action, None)
716
+
717
+ def step(self):
718
+ """
719
+ Signals the profiler that the next profiling step has started.
720
+ """
721
+ if self.record_steps and self.step_rec_fn:
722
+ self.step_rec_fn.__exit__(None, None, None)
723
+ prev_action = self.current_action
724
+ self.step_num += 1
725
+ self.current_action = self.schedule(self.step_num)
726
+
727
+ self._transit_action(prev_action, self.current_action)
728
+ prof.KinetoStepTracker.increment_step(PROFILER_STEP_NAME)
729
+
730
+ if self.record_steps:
731
+ self.step_rec_fn = prof.record_function(
732
+ "ProfilerStep#" + str(self.step_num)
733
+ )
734
+ self.step_rec_fn.__enter__()
735
+
736
+ def _trace_ready(self):
737
+ if self.on_trace_ready:
738
+ self.on_trace_ready(self)
739
+
740
+ def _transit_action(self, prev_action, current_action):
741
+ action_list = self.action_map.get((prev_action, current_action))
742
+ if action_list:
743
+ for action in action_list:
744
+ action()
745
+
746
+
747
+ class ExecutionTraceObserver(_ITraceObserver):
748
+ """Execution Trace Observer
749
+
750
+ Each process can have a single ExecutionTraceObserver instance. The observer
751
+ can be added to record function callbacks via calling register_callback()
752
+ explicitly. Without calling unregister_callback(), repeated calls to
753
+ register_callback() will not add additional observers to record function
754
+ callbacks. Once an ExecutionTraceObserver is created, the start() and stop()
755
+ methods control when the event data is recorded.
756
+
757
+ Deleting or calling unregister_callback() will remove the observer from the
758
+ record function callbacks, finalize the output file, and will stop
759
+ incurring any overheads.
760
+ """
761
+
762
+ def __init__(self):
763
+ """
764
+ Initializes the default states.
765
+ """
766
+ self._registered = False
767
+ self._execution_trace_running = False
768
+
769
+ def __del__(self):
770
+ """
771
+ Calls unregister_callback() to make sure to finalize outputs.
772
+ """
773
+ self.unregister_callback()
774
+
775
+ def register_callback(self, output_file_path: str) -> Self:
776
+ """
777
+ Adds ET observer to record function callbacks. The data will be
778
+ written to output_file_path.
779
+ """
780
+ if not self._registered:
781
+ self._output_file_path = output_file_path
782
+ self._registered = _add_execution_trace_observer(output_file_path)
783
+ return self
784
+
785
+ def unregister_callback(self):
786
+ """
787
+ Removes ET observer from record function callbacks.
788
+ """
789
+ if self._registered:
790
+ self.stop()
791
+ _remove_execution_trace_observer()
792
+ self._registered = False
793
+
794
+ @property
795
+ def is_registered(self):
796
+ """
797
+ Returns True if the execution trace observer is registered, otherwise False.
798
+ """
799
+ return self._registered
800
+
801
+ def is_running(self):
802
+ """
803
+ Returns True if the observer is running, otherwise False.
804
+ """
805
+ return self._execution_trace_running
806
+
807
+ def start(self):
808
+ """
809
+ Starts to capture.
810
+ """
811
+ if self._registered and not self._execution_trace_running:
812
+ _enable_execution_trace_observer()
813
+ self._execution_trace_running = True
814
+
815
+ def stop(self):
816
+ """
817
+ Stops to capture.
818
+ """
819
+ if self._execution_trace_running:
820
+ _disable_execution_trace_observer()
821
+ self._execution_trace_running = False
822
+
823
+ def cleanup(self):
824
+ """
825
+ Calls unregister_callback() to make sure to finalize outputs.
826
+ """
827
+ self.unregister_callback()
828
+
829
+ def get_output_file_path(self) -> str:
830
+ """
831
+ Returns the output file name.
832
+ """
833
+ if self.is_registered:
834
+ return self._output_file_path
835
+ else:
836
+ raise RuntimeError(
837
+ "A callback to the ET profiler needs to be registered "
838
+ "first before getting the output file path"
839
+ )
venv/lib/python3.10/site-packages/torch/profiler/python_tracer.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import site
3
+ import sys
4
+ import typing
5
+
6
+ import torch
7
+
8
+
9
+ def _prefix_regex() -> typing.List[str]:
10
+ raw_paths = (
11
+ site.getsitepackages()
12
+ + sys.path
13
+ + [site.getuserbase()]
14
+ + [site.getusersitepackages()]
15
+ + [os.path.dirname(os.path.dirname(torch.__file__))]
16
+ )
17
+
18
+ path_prefixes = sorted({os.path.abspath(i) for i in raw_paths}, reverse=True)
19
+ assert all(isinstance(i, str) for i in path_prefixes)
20
+ return [i + os.sep for i in path_prefixes]
venv/lib/python3.10/site-packages/torch/quantization/__init__.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .quantize import * # noqa: F403
2
+ from .observer import * # noqa: F403
3
+ from .qconfig import * # noqa: F403
4
+ from .fake_quantize import * # noqa: F403
5
+ from .fuse_modules import fuse_modules
6
+ from .stubs import * # noqa: F403
7
+ from .quant_type import * # noqa: F403
8
+ from .quantize_jit import * # noqa: F403
9
+
10
+ # from .quantize_fx import *
11
+ from .quantization_mappings import * # noqa: F403
12
+ from .fuser_method_mappings import * # noqa: F403
13
+
14
+
15
+ def default_eval_fn(model, calib_data):
16
+ r"""
17
+ Default evaluation function takes a torch.utils.data.Dataset or a list of
18
+ input Tensors and run the model on the dataset
19
+ """
20
+ for data, target in calib_data:
21
+ model(data)
22
+
23
+
24
+ __all__ = [
25
+ "QuantWrapper",
26
+ "QuantStub",
27
+ "DeQuantStub",
28
+ # Top level API for eager mode quantization
29
+ "quantize",
30
+ "quantize_dynamic",
31
+ "quantize_qat",
32
+ "prepare",
33
+ "convert",
34
+ "prepare_qat",
35
+ # Top level API for graph mode quantization on TorchScript
36
+ "quantize_jit",
37
+ "quantize_dynamic_jit",
38
+ "_prepare_ondevice_dynamic_jit",
39
+ "_convert_ondevice_dynamic_jit",
40
+ "_quantize_ondevice_dynamic_jit",
41
+ # Top level API for graph mode quantization on GraphModule(torch.fx)
42
+ # 'fuse_fx', 'quantize_fx', # TODO: add quantize_dynamic_fx
43
+ # 'prepare_fx', 'prepare_dynamic_fx', 'convert_fx',
44
+ "QuantType", # quantization type
45
+ # custom module APIs
46
+ "get_default_static_quant_module_mappings",
47
+ "get_static_quant_module_class",
48
+ "get_default_dynamic_quant_module_mappings",
49
+ "get_default_qat_module_mappings",
50
+ "get_default_qconfig_propagation_list",
51
+ "get_default_compare_output_module_list",
52
+ "get_quantized_operator",
53
+ "get_fuser_method",
54
+ # Sub functions for `prepare` and `swap_module`
55
+ "propagate_qconfig_",
56
+ "add_quant_dequant",
57
+ "swap_module",
58
+ "default_eval_fn",
59
+ # Observers
60
+ "ObserverBase",
61
+ "WeightObserver",
62
+ "HistogramObserver",
63
+ "observer",
64
+ "default_observer",
65
+ "default_weight_observer",
66
+ "default_placeholder_observer",
67
+ "default_per_channel_weight_observer",
68
+ # FakeQuantize (for qat)
69
+ "default_fake_quant",
70
+ "default_weight_fake_quant",
71
+ "default_fixed_qparams_range_neg1to1_fake_quant",
72
+ "default_fixed_qparams_range_0to1_fake_quant",
73
+ "default_per_channel_weight_fake_quant",
74
+ "default_histogram_fake_quant",
75
+ # QConfig
76
+ "QConfig",
77
+ "default_qconfig",
78
+ "default_dynamic_qconfig",
79
+ "float16_dynamic_qconfig",
80
+ "float_qparams_weight_only_qconfig",
81
+ # QAT utilities
82
+ "default_qat_qconfig",
83
+ "prepare_qat",
84
+ "quantize_qat",
85
+ # module transformations
86
+ "fuse_modules",
87
+ ]
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_numeric_suite_fx.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/_quantized_conversions.cpython-310.pyc ADDED
Binary file (2.7 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fake_quantize.cpython-310.pyc ADDED
Binary file (1.3 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuse_modules.cpython-310.pyc ADDED
Binary file (805 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/fuser_method_mappings.cpython-310.pyc ADDED
Binary file (723 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/observer.cpython-310.pyc ADDED
Binary file (1.38 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/qconfig.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quant_type.cpython-310.pyc ADDED
Binary file (597 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantization_mappings.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_fx.cpython-310.pyc ADDED
Binary file (993 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/quantize_jit.cpython-310.pyc ADDED
Binary file (966 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/stubs.cpython-310.pyc ADDED
Binary file (594 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite import (
11
+ _convert_tuple_to_list,
12
+ _dequantize_tensor_list,
13
+ _find_match,
14
+ _get_logger_dict_helper,
15
+ _is_identical_module_type,
16
+ compare_model_outputs,
17
+ compare_model_stub,
18
+ compare_weights,
19
+ get_logger_dict,
20
+ get_matching_activations,
21
+ Logger,
22
+ NON_LEAF_MODULE_TO_ADD_OBSERVER_ALLOW_LIST,
23
+ OutputLogger,
24
+ prepare_model_outputs,
25
+ prepare_model_with_stubs,
26
+ Shadow,
27
+ ShadowLogger,
28
+ )
venv/lib/python3.10/site-packages/torch/quantization/_numeric_suite_fx.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/ns/_numeric_suite_fx.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.ns._numeric_suite_fx import (
11
+ _add_loggers_impl,
12
+ _add_loggers_one_model,
13
+ _add_shadow_loggers_impl,
14
+ _extract_logger_info_one_model,
15
+ _extract_weights_impl,
16
+ _extract_weights_one_model,
17
+ add_loggers,
18
+ add_shadow_loggers,
19
+ extend_logger_results_with_comparison,
20
+ extract_logger_info,
21
+ extract_shadow_logger_info,
22
+ extract_weights,
23
+ NSTracer,
24
+ OutputLogger,
25
+ RNNReturnType,
26
+ )
venv/lib/python3.10/site-packages/torch/quantization/_quantized_conversions.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ # Pack pairs of int4 values into int8, in row major order; first int4
5
+ # value goes into lower order bits, and second int4 value into higher
6
+ # order bits of resulting int8 value.
7
+ def pack_int4_to_int8(weight):
8
+ assert weight.dim() == 2
9
+ assert weight.shape[1] % 2 == 0
10
+ assert weight.dtype == torch.int8
11
+ return ((weight[:, 1::2] & 0xF) << 4) | (weight[:, 0::2] & 0xF)
12
+
13
+
14
+ # Unpack quandruples of bits in int8 values into int4 values, in row
15
+ # major order; lower 4 bits go into first int4 value goes, and upper 4
16
+ # bits go into second int4 value.
17
+ def unpack_int8_to_int4(weight):
18
+ assert weight.dim() == 2
19
+ assert weight.dtype == torch.int8
20
+ return torch.stack((weight & 0xF, (weight >> 4) & 0xF), dim=2).view(
21
+ weight.shape[0], 2 * weight.shape[1]
22
+ )
23
+
24
+
25
+ # Transpose the weight matrix, and then reorder its elements according
26
+ # to underlying requirements of CUTLASS library, so that it could be
27
+ # used for CUTLASS-based mixed datatypes linear operation.
28
+ def quantized_weight_reorder_for_mixed_dtypes_linear_cutlass(
29
+ weight, dtypeq, transpose=False
30
+ ):
31
+ assert weight.dim() == 2
32
+ assert weight.dtype == torch.int8
33
+ assert dtypeq == torch.int8 or dtypeq == torch.quint4x2
34
+ assert weight.device.type == "cuda"
35
+
36
+ device = weight.device
37
+
38
+ # subbyte_transpose
39
+ if not transpose:
40
+ if dtypeq == torch.int8:
41
+ outp = weight.T
42
+ elif dtypeq == torch.quint4x2:
43
+ outp = pack_int4_to_int8(unpack_int8_to_int4(weight.view(torch.int8)).T)
44
+ else:
45
+ outp = weight
46
+
47
+ ncols, nrows = outp.shape # type: ignore[possibly-undefined]
48
+ assert nrows % (32 if dtypeq == torch.quint4x2 else 64) == 0
49
+ assert ncols % 64 == 0
50
+
51
+ # permute_B_rows_for_mixed_gemm
52
+ # (permute cols actually, as transpose is applied first here)
53
+ if dtypeq == torch.quint4x2:
54
+ cols_permuted = (
55
+ torch.tensor(
56
+ [0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15],
57
+ device=device,
58
+ )
59
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
60
+ nrows // 16, 16
61
+ )
62
+ ).view(-1)
63
+ else:
64
+ cols_permuted = (
65
+ torch.tensor(
66
+ [0, 1, 4, 5, 8, 9, 12, 13, 2, 3, 6, 7, 10, 11, 14, 15],
67
+ device=device,
68
+ )
69
+ + (torch.arange(0, nrows // 16, device=device).reshape(-1, 1) * 16).expand(
70
+ nrows // 16, 16
71
+ )
72
+ ).view(-1)
73
+ outp = outp.index_copy(1, cols_permuted, outp)
74
+
75
+ # interleave_column_major_tensor
76
+ magic0 = 4 if dtypeq == torch.quint4x2 else 2
77
+ magic1 = 32 // magic0
78
+
79
+ tmp0 = (
80
+ (torch.arange(0, ncols // magic0, device=device) * (nrows // 4 * magic0))
81
+ .view(-1, 1)
82
+ .repeat(1, nrows // 4 * magic0)
83
+ .view(-1)
84
+ )
85
+ tmp1 = (
86
+ (torch.arange(0, nrows // 4 // magic1, device=device) * (magic0 * magic1))
87
+ .view(-1, 1)
88
+ .repeat(1, magic1)
89
+ .view(-1)
90
+ .repeat(ncols)
91
+ )
92
+ tmp2 = (
93
+ (torch.arange(0, magic0, device=device) * magic1)
94
+ .view(-1, 1)
95
+ .repeat(1, nrows // 4)
96
+ .view(-1)
97
+ .repeat(ncols // magic0)
98
+ )
99
+ tmp3 = torch.arange(0, magic1, device=device).repeat(nrows // 4 * ncols // magic1)
100
+
101
+ outp_offsets = tmp0 + tmp1 + tmp2 + tmp3
102
+
103
+ tmp = outp.view(-1).view(torch.int32)
104
+ outp = torch.zeros_like(tmp)
105
+ outp.scatter_(0, outp_offsets, tmp)
106
+ outp = outp.view(weight.dtype)
107
+
108
+ # add_bias_and_interleave_quantized_tensor_inplace
109
+ tmp = outp.view(-1)
110
+
111
+ outp = torch.empty_like(tmp)
112
+ if dtypeq == torch.int8:
113
+ tmp = (tmp.to(torch.int) + 128).to(tmp.dtype)
114
+ outp[0::4] = tmp[0::4]
115
+ outp[1::4] = tmp[2::4]
116
+ outp[2::4] = tmp[1::4]
117
+ outp[3::4] = tmp[3::4]
118
+ elif dtypeq == torch.quint4x2:
119
+ tmp0 = ((tmp & 0xF) + 8) & 0xF
120
+ tmp0 = (tmp0[1::2] << 4) | tmp0[0::2]
121
+ tmp1 = (((tmp >> 4) & 0xF) + 8) & 0xF
122
+ tmp1 = (tmp1[1::2] << 4) | tmp1[0::2]
123
+ outp[0::4] = tmp0[0::2]
124
+ outp[1::4] = tmp0[1::2]
125
+ outp[2::4] = tmp1[0::2]
126
+ outp[3::4] = tmp1[1::2]
127
+
128
+ if dtypeq == torch.quint4x2:
129
+ nrows *= 2
130
+ ncols //= 2
131
+
132
+ return outp.view(nrows, ncols).view(torch.uint8)
venv/lib/python3.10/site-packages/torch/quantization/fake_quantize.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fake_quantize.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.quantization.fake_quantize import (
11
+ _is_fake_quant_script_module,
12
+ _is_per_channel,
13
+ _is_per_tensor,
14
+ _is_symmetric_quant,
15
+ default_fake_quant,
16
+ default_fixed_qparams_range_0to1_fake_quant,
17
+ default_fixed_qparams_range_neg1to1_fake_quant,
18
+ default_fused_act_fake_quant,
19
+ default_fused_per_channel_wt_fake_quant,
20
+ default_fused_wt_fake_quant,
21
+ default_histogram_fake_quant,
22
+ default_per_channel_weight_fake_quant,
23
+ default_weight_fake_quant,
24
+ disable_fake_quant,
25
+ disable_observer,
26
+ enable_fake_quant,
27
+ enable_observer,
28
+ FakeQuantize,
29
+ FakeQuantizeBase,
30
+ FixedQParamsFakeQuantize,
31
+ FusedMovingAvgObsFakeQuantize,
32
+ )
venv/lib/python3.10/site-packages/torch/quantization/fuse_modules.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fuse_modules.py`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ # TODO: These functions are not used outside the `fuse_modules.py`
11
+ # Keeping here for now, need to remove them later.
12
+ from torch.ao.quantization.fuse_modules import (
13
+ _fuse_modules,
14
+ _get_module,
15
+ _set_module,
16
+ fuse_known_modules,
17
+ fuse_modules,
18
+ get_fuser_method,
19
+ )
20
+
21
+ # for backward compatiblity
22
+ from torch.ao.quantization.fuser_method_mappings import fuse_conv_bn, fuse_conv_bn_relu
venv/lib/python3.10/site-packages/torch/quantization/fuser_method_mappings.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ `torch/ao/quantization/fuser_method_mappings.py`, while adding an import statement
7
+ here.
8
+ """
9
+ from torch.ao.quantization.fuser_method_mappings import (
10
+ _DEFAULT_OP_LIST_TO_FUSER_METHOD,
11
+ fuse_conv_bn,
12
+ fuse_conv_bn_relu,
13
+ fuse_linear_bn,
14
+ get_fuser_method,
15
+ )
venv/lib/python3.10/site-packages/torch/quantization/fx/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: F401
2
+ r"""
3
+ This file is in the process of migration to `torch/ao/quantization`, and
4
+ is kept here for compatibility while the migration process is ongoing.
5
+ If you are adding a new entry/functionality, please, add it to the
6
+ appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
7
+ here.
8
+ """
9
+
10
+ from torch.ao.quantization.fx.convert import convert
11
+ from torch.ao.quantization.fx.fuse import fuse
12
+
13
+ # omitting files that's unlikely to be used right now, for example
14
+ # the newly added lower_to_fbgemm etc.
15
+ from torch.ao.quantization.fx.prepare import prepare
venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (699 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/_equalize.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/convert.cpython-310.pyc ADDED
Binary file (576 Bytes). View file
 
venv/lib/python3.10/site-packages/torch/quantization/fx/__pycache__/fuse.cpython-310.pyc ADDED
Binary file (567 Bytes). View file