applied-ai-018 commited on
Commit
5cc41a3
·
verified ·
1 Parent(s): 4d8e20b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/__init__.py +6 -0
  22. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py +0 -0
  24. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py +221 -0
  32. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py +98 -0
  33. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py +85 -0
  34. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py +103 -0
  35. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py +25 -0
  36. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py +113 -0
  37. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py +0 -0
  38. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py +106 -0
  45. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py +106 -0
  46. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py +82 -0
  47. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py +93 -0
  48. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py +81 -0
  49. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py +0 -0
  50. llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_content_store.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cpp_extension_versioner.cpython-310.pyc ADDED
Binary file (1.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_cuda_trace.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_device.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_foreach_utils.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_mode_utils.cpython-310.pyc ADDED
Binary file (609 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_python_dispatch.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_pytree.cpython-310.pyc ADDED
Binary file (44.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/_zip.cpython-310.pyc ADDED
Binary file (1.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/backend_registration.cpython-310.pyc ADDED
Binary file (14.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/checkpoint.cpython-310.pyc ADDED
Binary file (43.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/deterministic.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/file_baton.cpython-310.pyc ADDED
Binary file (1.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/flop_counter.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/hooks.cpython-310.pyc ADDED
Binary file (8.74 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/mkldnn.cpython-310.pyc ADDED
Binary file (7.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/mobile_optimizer.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/model_zoo.cpython-310.pyc ADDED
Binary file (268 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/show_pickle.cpython-310.pyc ADDED
Binary file (5.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/__pycache__/throughput_benchmark.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from torch.utils.benchmark.utils.common import * # noqa: F403
2
+ from torch.utils.benchmark.utils.timer import * # noqa: F403
3
+ from torch.utils.benchmark.utils.compare import * # noqa: F403
4
+ from torch.utils.benchmark.utils.fuzzer import * # noqa: F403
5
+ from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import * # noqa: F403
6
+ from torch.utils.benchmark.utils.sparse_fuzzer import * # noqa: F403
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (508 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (203 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/blas_compare_setup.cpython-310.pyc ADDED
Binary file (4.06 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/compare.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/fuzzer.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/op_benchmark.cpython-310.pyc ADDED
Binary file (4.08 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/simple_timeit.cpython-310.pyc ADDED
Binary file (897 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/__pycache__/spectral_ops_fuzz_test.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/blas_compare_setup.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import os
3
+ import shutil
4
+ import subprocess
5
+
6
+ try:
7
+ # no type stub for conda command line interface
8
+ import conda.cli.python_api # type: ignore[import]
9
+ from conda.cli.python_api import Commands as conda_commands
10
+ except ImportError:
11
+ # blas_compare.py will fail to import these when it's inside a conda env,
12
+ # but that's fine as it only wants the constants.
13
+ pass
14
+
15
+
16
+ WORKING_ROOT = "/tmp/pytorch_blas_compare_environments"
17
+ MKL_2020_3 = "mkl_2020_3"
18
+ MKL_2020_0 = "mkl_2020_0"
19
+ OPEN_BLAS = "open_blas"
20
+ EIGEN = "eigen"
21
+
22
+
23
+ GENERIC_ENV_VARS = ("USE_CUDA=0", "USE_ROCM=0")
24
+ BASE_PKG_DEPS = (
25
+ "cmake",
26
+ "hypothesis",
27
+ "ninja",
28
+ "numpy",
29
+ "pyyaml",
30
+ "setuptools",
31
+ "typing_extensions",
32
+ )
33
+
34
+
35
+ SubEnvSpec = collections.namedtuple(
36
+ "SubEnvSpec", (
37
+ "generic_installs",
38
+ "special_installs",
39
+ "environment_variables",
40
+
41
+ # Validate install.
42
+ "expected_blas_symbols",
43
+ "expected_mkl_version",
44
+ ))
45
+
46
+
47
+ SUB_ENVS = {
48
+ MKL_2020_3: SubEnvSpec(
49
+ generic_installs=(),
50
+ special_installs=("intel", ("mkl=2020.3", "mkl-include=2020.3")),
51
+ environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
52
+ expected_blas_symbols=("mkl_blas_sgemm",),
53
+ expected_mkl_version="2020.0.3",
54
+ ),
55
+
56
+ MKL_2020_0: SubEnvSpec(
57
+ generic_installs=(),
58
+ special_installs=("intel", ("mkl=2020.0", "mkl-include=2020.0")),
59
+ environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS,
60
+ expected_blas_symbols=("mkl_blas_sgemm",),
61
+ expected_mkl_version="2020.0.0",
62
+ ),
63
+
64
+ OPEN_BLAS: SubEnvSpec(
65
+ generic_installs=("openblas",),
66
+ special_installs=(),
67
+ environment_variables=("BLAS=OpenBLAS",) + GENERIC_ENV_VARS,
68
+ expected_blas_symbols=("exec_blas",),
69
+ expected_mkl_version=None,
70
+ ),
71
+
72
+ # EIGEN: SubEnvSpec(
73
+ # generic_installs=(),
74
+ # special_installs=(),
75
+ # environment_variables=("BLAS=Eigen",) + GENERIC_ENV_VARS,
76
+ # expected_blas_symbols=(),
77
+ # ),
78
+ }
79
+
80
+
81
+ def conda_run(*args):
82
+ """Convenience method."""
83
+ stdout, stderr, retcode = conda.cli.python_api.run_command(*args)
84
+ if retcode:
85
+ raise OSError(f"conda error: {str(args)} retcode: {retcode}\n{stderr}")
86
+
87
+ return stdout
88
+
89
+
90
+ def main():
91
+ if os.path.exists(WORKING_ROOT):
92
+ print("Cleaning: removing old working root.")
93
+ shutil.rmtree(WORKING_ROOT)
94
+ os.makedirs(WORKING_ROOT)
95
+
96
+ git_root = subprocess.check_output(
97
+ "git rev-parse --show-toplevel",
98
+ shell=True,
99
+ cwd=os.path.dirname(os.path.realpath(__file__))
100
+ ).decode("utf-8").strip()
101
+
102
+ for env_name, env_spec in SUB_ENVS.items():
103
+ env_path = os.path.join(WORKING_ROOT, env_name)
104
+ print(f"Creating env: {env_name}: ({env_path})")
105
+ conda_run(
106
+ conda_commands.CREATE,
107
+ "--no-default-packages",
108
+ "--prefix", env_path,
109
+ "python=3",
110
+ )
111
+
112
+ print("Testing that env can be activated:")
113
+ base_source = subprocess.run(
114
+ f"source activate {env_path}",
115
+ shell=True,
116
+ capture_output=True,
117
+ check=False,
118
+ )
119
+ if base_source.returncode:
120
+ raise OSError(
121
+ "Failed to source base environment:\n"
122
+ f" stdout: {base_source.stdout.decode('utf-8')}\n"
123
+ f" stderr: {base_source.stderr.decode('utf-8')}"
124
+ )
125
+
126
+ print("Installing packages:")
127
+ conda_run(
128
+ conda_commands.INSTALL,
129
+ "--prefix", env_path,
130
+ *(BASE_PKG_DEPS + env_spec.generic_installs)
131
+ )
132
+
133
+ if env_spec.special_installs:
134
+ channel, channel_deps = env_spec.special_installs
135
+ print(f"Installing packages from channel: {channel}")
136
+ conda_run(
137
+ conda_commands.INSTALL,
138
+ "--prefix", env_path,
139
+ "-c", channel, *channel_deps
140
+ )
141
+
142
+ if env_spec.environment_variables:
143
+ print("Setting environment variables.")
144
+
145
+ # This does not appear to be possible using the python API.
146
+ env_set = subprocess.run(
147
+ f"source activate {env_path} && "
148
+ f"conda env config vars set {' '.join(env_spec.environment_variables)}",
149
+ shell=True,
150
+ capture_output=True,
151
+ check=False,
152
+ )
153
+ if env_set.returncode:
154
+ raise OSError(
155
+ "Failed to set environment variables:\n"
156
+ f" stdout: {env_set.stdout.decode('utf-8')}\n"
157
+ f" stderr: {env_set.stderr.decode('utf-8')}"
158
+ )
159
+
160
+ # Check that they were actually set correctly.
161
+ actual_env_vars = subprocess.run(
162
+ f"source activate {env_path} && env",
163
+ shell=True,
164
+ capture_output=True,
165
+ check=True,
166
+ ).stdout.decode("utf-8").strip().splitlines()
167
+ for e in env_spec.environment_variables:
168
+ assert e in actual_env_vars, f"{e} not in envs"
169
+
170
+ print(f"Building PyTorch for env: `{env_name}`")
171
+ # We have to re-run during each build to pick up the new
172
+ # build config settings.
173
+ build_run = subprocess.run(
174
+ f"source activate {env_path} && "
175
+ f"cd {git_root} && "
176
+ "python setup.py install --cmake",
177
+ shell=True,
178
+ capture_output=True,
179
+ check=True,
180
+ )
181
+
182
+ print("Checking configuration:")
183
+ check_run = subprocess.run(
184
+ # Shameless abuse of `python -c ...`
185
+ f"source activate {env_path} && "
186
+ "python -c \""
187
+ "import torch;"
188
+ "from torch.utils.benchmark import Timer;"
189
+ "print(torch.__config__.show());"
190
+ "setup = 'x=torch.ones((128, 128));y=torch.ones((128, 128))';"
191
+ "counts = Timer('torch.mm(x, y)', setup).collect_callgrind(collect_baseline=False);"
192
+ "stats = counts.as_standardized().stats(inclusive=True);"
193
+ "print(stats.filter(lambda l: 'blas' in l.lower()))\"",
194
+ shell=True,
195
+ capture_output=True,
196
+ check=False,
197
+ )
198
+ if check_run.returncode:
199
+ raise OSError(
200
+ "Failed to set environment variables:\n"
201
+ f" stdout: {check_run.stdout.decode('utf-8')}\n"
202
+ f" stderr: {check_run.stderr.decode('utf-8')}"
203
+ )
204
+ check_run_stdout = check_run.stdout.decode('utf-8')
205
+ print(check_run_stdout)
206
+
207
+ for e in env_spec.environment_variables:
208
+ if "BLAS" in e:
209
+ assert e in check_run_stdout, f"PyTorch build did not respect `BLAS=...`: {e}"
210
+
211
+ for s in env_spec.expected_blas_symbols:
212
+ assert s in check_run_stdout
213
+
214
+ if env_spec.expected_mkl_version is not None:
215
+ assert f"- Intel(R) Math Kernel Library Version {env_spec.expected_mkl_version}" in check_run_stdout
216
+
217
+ print(f"Build complete: {env_name}")
218
+
219
+
220
+ if __name__ == "__main__":
221
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/compare.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of Timer and Compare APIs:
2
+
3
+ $ python -m examples.compare
4
+ """
5
+
6
+ import pickle
7
+ import sys
8
+ import time
9
+
10
+ import torch
11
+
12
+ import torch.utils.benchmark as benchmark_utils
13
+
14
+
15
+ class FauxTorch:
16
+ """Emulate different versions of pytorch.
17
+
18
+ In normal circumstances this would be done with multiple processes
19
+ writing serialized measurements, but this simplifies that model to
20
+ make the example clearer.
21
+ """
22
+ def __init__(self, real_torch, extra_ns_per_element):
23
+ self._real_torch = real_torch
24
+ self._extra_ns_per_element = extra_ns_per_element
25
+
26
+ def extra_overhead(self, result):
27
+ # time.sleep has a ~65 us overhead, so only fake a
28
+ # per-element overhead if numel is large enough.
29
+ numel = int(result.numel())
30
+ if numel > 5000:
31
+ time.sleep(numel * self._extra_ns_per_element * 1e-9)
32
+ return result
33
+
34
+ def add(self, *args, **kwargs):
35
+ return self.extra_overhead(self._real_torch.add(*args, **kwargs))
36
+
37
+ def mul(self, *args, **kwargs):
38
+ return self.extra_overhead(self._real_torch.mul(*args, **kwargs))
39
+
40
+ def cat(self, *args, **kwargs):
41
+ return self.extra_overhead(self._real_torch.cat(*args, **kwargs))
42
+
43
+ def matmul(self, *args, **kwargs):
44
+ return self.extra_overhead(self._real_torch.matmul(*args, **kwargs))
45
+
46
+
47
+ def main():
48
+ tasks = [
49
+ ("add", "add", "torch.add(x, y)"),
50
+ ("add", "add (extra +0)", "torch.add(x, y + zero)"),
51
+ ]
52
+
53
+ serialized_results = []
54
+ repeats = 2
55
+ timers = [
56
+ benchmark_utils.Timer(
57
+ stmt=stmt,
58
+ globals={
59
+ "torch": torch if branch == "master" else FauxTorch(torch, overhead_ns),
60
+ "x": torch.ones((size, 4)),
61
+ "y": torch.ones((1, 4)),
62
+ "zero": torch.zeros(()),
63
+ },
64
+ label=label,
65
+ sub_label=sub_label,
66
+ description=f"size: {size}",
67
+ env=branch,
68
+ num_threads=num_threads,
69
+ )
70
+ for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)]
71
+ for label, sub_label, stmt in tasks
72
+ for size in [1, 10, 100, 1000, 10000, 50000]
73
+ for num_threads in [1, 4]
74
+ ]
75
+
76
+ for i, timer in enumerate(timers * repeats):
77
+ serialized_results.append(pickle.dumps(
78
+ timer.blocked_autorange(min_run_time=0.05)
79
+ ))
80
+ print(f"\r{i + 1} / {len(timers) * repeats}", end="")
81
+ sys.stdout.flush()
82
+ print()
83
+
84
+ comparison = benchmark_utils.Compare([
85
+ pickle.loads(i) for i in serialized_results
86
+ ])
87
+
88
+ print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n")
89
+ comparison.print()
90
+
91
+ print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n")
92
+ comparison.trim_significant_figures()
93
+ comparison.colorize()
94
+ comparison.print()
95
+
96
+
97
+ if __name__ == "__main__":
98
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/fuzzer.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example of the Timer and Fuzzer APIs:
2
+
3
+ $ python -m examples.fuzzer
4
+ """
5
+
6
+ import sys
7
+
8
+ import torch.utils.benchmark as benchmark_utils
9
+
10
+
11
+ def main():
12
+ add_fuzzer = benchmark_utils.Fuzzer(
13
+ parameters=[
14
+ [
15
+ benchmark_utils.FuzzedParameter(
16
+ name=f"k{i}",
17
+ minval=16,
18
+ maxval=16 * 1024,
19
+ distribution="loguniform",
20
+ ) for i in range(3)
21
+ ],
22
+ benchmark_utils.FuzzedParameter(
23
+ name="d",
24
+ distribution={2: 0.6, 3: 0.4},
25
+ ),
26
+ ],
27
+ tensors=[
28
+ [
29
+ benchmark_utils.FuzzedTensor(
30
+ name=name,
31
+ size=("k0", "k1", "k2"),
32
+ dim_parameter="d",
33
+ probability_contiguous=0.75,
34
+ min_elements=64 * 1024,
35
+ max_elements=128 * 1024,
36
+ ) for name in ("x", "y")
37
+ ],
38
+ ],
39
+ seed=0,
40
+ )
41
+
42
+ n = 250
43
+ measurements = []
44
+ for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)):
45
+ x, x_order = tensors["x"], str(tensor_properties["x"]["order"])
46
+ y, y_order = tensors["y"], str(tensor_properties["y"]["order"])
47
+ shape = ", ".join(tuple(f'{i:>4}' for i in x.shape))
48
+
49
+ description = "".join([
50
+ f"{x.numel():>7} | {shape:<16} | ",
51
+ f"{'contiguous' if x.is_contiguous() else x_order:<12} | ",
52
+ f"{'contiguous' if y.is_contiguous() else y_order:<12} | ",
53
+ ])
54
+
55
+ timer = benchmark_utils.Timer(
56
+ stmt="x + y",
57
+ globals=tensors,
58
+ description=description,
59
+ )
60
+
61
+ measurements.append(timer.blocked_autorange(min_run_time=0.1))
62
+ measurements[-1].metadata = {"numel": x.numel()}
63
+ print(f"\r{i + 1} / {n}", end="")
64
+ sys.stdout.flush()
65
+ print()
66
+
67
+ # More string munging to make pretty output.
68
+ print(f"Average attempts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}")
69
+
70
+ def time_fn(m):
71
+ return m.median / m.metadata["numel"]
72
+ measurements.sort(key=time_fn)
73
+
74
+ template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}"
75
+ print(template.format("Best:"))
76
+ for m in measurements[:15]:
77
+ print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
78
+
79
+ print("\n" + template.format("Worst:"))
80
+ for m in measurements[-15:]:
81
+ print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}")
82
+
83
+
84
+ if __name__ == "__main__":
85
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/op_benchmark.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Example use of Timer and op fuzzers to measure kernel performance.
2
+
3
+ $ python -m examples.op_benchmark
4
+ """
5
+
6
+ import numpy as np
7
+ import torch
8
+
9
+ from torch.utils.benchmark import Timer
10
+ from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer
11
+ from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer
12
+
13
+
14
+ _MEASURE_TIME = 1.0
15
+
16
+
17
+ def assert_dicts_equal(dict_0, dict_1):
18
+ """Builtin dict comparison will not compare numpy arrays.
19
+ e.g.
20
+ x = {"a": np.ones((2, 1))}
21
+ x == x # Raises ValueError
22
+ """
23
+ assert set(dict_0.keys()) == set(dict_0.keys())
24
+ assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype")
25
+
26
+
27
+ def run(n, stmt, fuzzer_cls):
28
+ float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n)
29
+ int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n)
30
+ raw_results = []
31
+ for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)):
32
+ float_tensors, float_tensor_params, float_params = float_values
33
+ int_tensors, int_tensor_params, int_params = int_values
34
+
35
+ # This benchmark assumes that the two fuzzers generate identically
36
+ # sized and strided Tensors, since the same seed is used.
37
+ assert_dicts_equal(float_params, int_params)
38
+ assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"])
39
+
40
+ float_measurement, int_measurement = (
41
+ Timer(
42
+ stmt,
43
+ globals=tensors,
44
+ ).blocked_autorange(min_run_time=_MEASURE_TIME)
45
+ for tensors in (float_tensors, int_tensors)
46
+ )
47
+
48
+ descriptions = []
49
+ for name in float_tensors:
50
+ shape_str = "(" + ", ".join([
51
+ f"2 ** {int(np.log2(i))}"
52
+ if 2 ** int(np.log2(i)) == i and i > 1
53
+ else str(i)
54
+ for i in float_tensors[name].shape
55
+ ]) + ")"
56
+ order = float_tensor_params[name]["order"]
57
+ order_str = ("" if all(order == np.arange(len(order))) else str(tuple(order)))
58
+ steps = float_tensor_params[name]["steps"]
59
+ steps_str = str(steps) if sum(steps) > len(steps) else ""
60
+ descriptions.append((name, shape_str, order_str, steps_str))
61
+ raw_results.append((float_measurement, int_measurement, descriptions))
62
+
63
+ print(f"\r{i + 1} / {n}", end="")
64
+ print()
65
+
66
+ parsed_results, name_len, shape_len, order_len, steps_len = [], 0, 0, 0, 0
67
+ for float_measurement, int_measurement, descriptions in raw_results:
68
+ t_float = float_measurement.median * 1e6
69
+ t_int = int_measurement.median * 1e6
70
+ rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2
71
+ parsed_results.append((t_float, t_int, rel_diff, descriptions))
72
+ for name, shape, order, steps in descriptions:
73
+ name_len = max(name_len, len(name))
74
+ shape_len = max(shape_len, len(shape))
75
+ order_len = max(order_len, len(order))
76
+ steps_len = max(steps_len, len(steps))
77
+
78
+ parsed_results.sort(key=lambda x: x[2])
79
+
80
+ print(f"stmt: {stmt}")
81
+ print(f" diff faster{'':>17}{' ' * name_len} ", end="")
82
+ print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end="")
83
+ print(f" steps\n{'-' * 100}")
84
+ for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]:
85
+ for t_float, t_int, rel_diff, descriptions in results:
86
+ time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"]
87
+ time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]])
88
+ for t_str, (name, shape, order, steps) in zip(time_str, descriptions):
89
+ name = f"{name}:".ljust(name_len + 1)
90
+ shape = shape.ljust(shape_len + 10)
91
+ order = order.ljust(order_len)
92
+ print(f"{t_str} {name} {shape}| {order} | {steps}")
93
+ print(spacer)
94
+
95
+
96
+ def main():
97
+ run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer)
98
+ run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer)
99
+ run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer)
100
+
101
+
102
+ if __name__ == "__main__":
103
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/simple_timeit.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Trivial use of Timer API:
2
+
3
+ $ python -m examples.simple_timeit
4
+ """
5
+
6
+ import torch
7
+
8
+ import torch.utils.benchmark as benchmark_utils
9
+
10
+
11
+ def main():
12
+ timer = benchmark_utils.Timer(
13
+ stmt="x + y",
14
+ globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))},
15
+ label="Broadcasting add (4x8)",
16
+ )
17
+
18
+ for i in range(3):
19
+ print(f"Run: {i}\n{'-' * 40}")
20
+ print(f"timeit:\n{timer.timeit(10000)}\n")
21
+ print(f"autorange:\n{timer.blocked_autorange()}\n\n")
22
+
23
+
24
+ if __name__ == "__main__":
25
+ main()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/examples/spectral_ops_fuzz_test.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Microbenchmarks for the torch.fft module"""
2
+ from argparse import ArgumentParser
3
+ from collections import namedtuple
4
+ from collections.abc import Iterable
5
+
6
+ import torch
7
+ import torch.fft
8
+ from torch.utils import benchmark
9
+ from torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer
10
+
11
+
12
+ def _dim_options(ndim):
13
+ if ndim == 1:
14
+ return [None]
15
+ elif ndim == 2:
16
+ return [0, 1, None]
17
+ elif ndim == 3:
18
+ return [0, 1, 2, (0, 1), (0, 2), None]
19
+ raise ValueError(f"Expected ndim in range 1-3, got {ndim}")
20
+
21
+
22
+ def run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int,
23
+ probability_regular: float):
24
+ cuda = device == 'cuda'
25
+ spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda,
26
+ probability_regular=probability_regular)
27
+ results = []
28
+ for tensors, tensor_params, params in spectral_fuzzer.take(samples):
29
+ shape = [params['k0'], params['k1'], params['k2']][:params['ndim']]
30
+ str_shape = ' x '.join([f"{s:<4}" for s in shape])
31
+ sub_label = f"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}"
32
+ for dim in _dim_options(params['ndim']):
33
+ for nthreads in (1, 4, 16) if not cuda else (1,):
34
+ measurement = benchmark.Timer(
35
+ stmt='func(x, dim=dim)',
36
+ globals={'func': function, 'x': tensors['x'], 'dim': dim},
37
+ label=f"{name}_{device}",
38
+ sub_label=sub_label,
39
+ description=f"dim={dim}",
40
+ num_threads=nthreads,
41
+ ).blocked_autorange(min_run_time=1)
42
+ measurement.metadata = {
43
+ 'name': name,
44
+ 'device': device,
45
+ 'dim': dim,
46
+ 'shape': shape,
47
+ }
48
+ measurement.metadata.update(tensor_params['x'])
49
+ results.append(measurement)
50
+ return results
51
+
52
+
53
+ Benchmark = namedtuple('Benchmark', ['name', 'function', 'dtype'])
54
+ BENCHMARKS = [
55
+ Benchmark('fft_real', torch.fft.fftn, torch.float32),
56
+ Benchmark('fft_complex', torch.fft.fftn, torch.complex64),
57
+ Benchmark('ifft', torch.fft.ifftn, torch.complex64),
58
+ Benchmark('rfft', torch.fft.rfftn, torch.float32),
59
+ Benchmark('irfft', torch.fft.irfftn, torch.complex64),
60
+ ]
61
+ BENCHMARK_MAP = {b.name: b for b in BENCHMARKS}
62
+ BENCHMARK_NAMES = [b.name for b in BENCHMARKS]
63
+ DEVICE_NAMES = ['cpu', 'cuda']
64
+
65
+ def _output_csv(file, results):
66
+ file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n')
67
+ for measurement in results:
68
+ metadata = measurement.metadata
69
+ device, dim, shape, name, numel, contiguous = (
70
+ metadata['device'], metadata['dim'], metadata['shape'],
71
+ metadata['name'], metadata['numel'], metadata['is_contiguous'])
72
+
73
+ if isinstance(dim, Iterable):
74
+ dim_str = '-'.join(str(d) for d in dim)
75
+ else:
76
+ dim_str = str(dim)
77
+ shape_str = 'x'.join(str(s) for s in shape)
78
+
79
+ print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, # type: ignore[possibly-undefined]
80
+ measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6,
81
+ sep=',', file=file)
82
+
83
+
84
+ if __name__ == '__main__':
85
+ parser = ArgumentParser(description=__doc__)
86
+ parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES)
87
+ parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES)
88
+ parser.add_argument('--seed', type=int, default=0)
89
+ parser.add_argument('--samples', type=int, default=10)
90
+ parser.add_argument('--probability-regular', '--probability_regular', type=float, default=1.0)
91
+ parser.add_argument('-o', '--output', type=str)
92
+ args = parser.parse_args()
93
+
94
+ num_benchmarks = len(args.device) * len(args.bench)
95
+ i = 0
96
+ results = []
97
+ for device in args.device:
98
+ for bench in (BENCHMARK_MAP[b] for b in args.bench):
99
+ results += run_benchmark(
100
+ name=bench.name, function=bench.function, dtype=bench.dtype,
101
+ seed=args.seed, device=device, samples=args.samples,
102
+ probability_regular=args.probability_regular)
103
+ i += 1
104
+ print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})')
105
+
106
+ if args.output is not None:
107
+ with open(args.output, 'w') as f:
108
+ _output_csv(f, results)
109
+
110
+ compare = benchmark.Compare(results)
111
+ compare.trim_significant_figures()
112
+ compare.colorize()
113
+ compare.print()
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (205 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/binary.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_binary.cpython-310.pyc ADDED
Binary file (2.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/sparse_unary.cpython-310.pyc ADDED
Binary file (2.44 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/spectral.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/__pycache__/unary.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/binary.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class BinaryOpFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+
22
+ # Shapes for `x` and `y`.
23
+ # It is important to test all shapes, however
24
+ # powers of two are especially important and therefore
25
+ # warrant special attention. This is done by generating
26
+ # both a value drawn from all integers between the min and
27
+ # max allowed values, and another from only the powers of two
28
+ # (both distributions are loguniform) and then randomly
29
+ # selecting between the two.
30
+ # Moreover, `y` will occasionally have singleton
31
+ # dimensions in order to test broadcasting.
32
+ [
33
+ FuzzedParameter(
34
+ name=f"k_any_{i}",
35
+ minval=_MIN_DIM_SIZE,
36
+ maxval=_MAX_DIM_SIZE,
37
+ distribution="loguniform",
38
+ ) for i in range(3)
39
+ ],
40
+ [
41
+ FuzzedParameter(
42
+ name=f"k_pow2_{i}",
43
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
44
+ ) for i in range(3)
45
+ ],
46
+ [
47
+ FuzzedParameter(
48
+ name=f"k{i}",
49
+ distribution={
50
+ ParameterAlias(f"k_any_{i}"): 0.8,
51
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
52
+ },
53
+ strict=True,
54
+ ) for i in range(3)
55
+ ],
56
+
57
+ [
58
+ FuzzedParameter(
59
+ name=f"y_k{i}",
60
+ distribution={
61
+ ParameterAlias(f"k{i}"): 0.8,
62
+ 1: 0.2,
63
+ },
64
+ strict=True,
65
+ ) for i in range(3)
66
+ ],
67
+
68
+ # Steps for `x` and `y`. (Benchmarks strided memory access.)
69
+ [
70
+ FuzzedParameter(
71
+ name=f"{name}_step_{i}",
72
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
73
+ )
74
+ for i in range(3)
75
+ for name in ("x", "y")
76
+ ],
77
+
78
+ # Repeatable entropy for downstream applications.
79
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
80
+ ],
81
+ tensors=[
82
+ FuzzedTensor(
83
+ name="x",
84
+ size=("k0", "k1", "k2"),
85
+ steps=("x_step_0", "x_step_1", "x_step_2"),
86
+ probability_contiguous=0.75,
87
+ min_elements=4 * 1024,
88
+ max_elements=32 * 1024 ** 2,
89
+ max_allocation_bytes=2 * 1024**3, # 2 GB
90
+ dim_parameter="dim",
91
+ dtype=dtype,
92
+ cuda=cuda,
93
+ ),
94
+ FuzzedTensor(
95
+ name="y",
96
+ size=("y_k0", "y_k1", "y_k2"),
97
+ steps=("x_step_0", "x_step_1", "x_step_2"),
98
+ probability_contiguous=0.75,
99
+ max_allocation_bytes=2 * 1024**3, # 2 GB
100
+ dim_parameter="dim",
101
+ dtype=dtype,
102
+ cuda=cuda,
103
+ ),
104
+ ],
105
+ seed=seed,
106
+ )
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_binary.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class BinaryOpSparseFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+ FuzzedParameter(
22
+ name="sparse_dim",
23
+ distribution={1: 0.4, 2: 0.4, 3: 0.2},
24
+ strict=True
25
+ ),
26
+ # Shapes for `x` and `y`.
27
+ # It is important to test all shapes, however
28
+ # powers of two are especially important and therefore
29
+ # warrant special attention. This is done by generating
30
+ # both a value drawn from all integers between the min and
31
+ # max allowed values, and another from only the powers of two
32
+ # (both distributions are loguniform) and then randomly
33
+ # selecting between the two.
34
+ # Moreover, `y` will occasionally have singleton
35
+ # dimensions in order to test broadcasting.
36
+ [
37
+ FuzzedParameter(
38
+ name=f"k_any_{i}",
39
+ minval=_MIN_DIM_SIZE,
40
+ maxval=_MAX_DIM_SIZE,
41
+ distribution="loguniform",
42
+ ) for i in range(3)
43
+ ],
44
+ [
45
+ FuzzedParameter(
46
+ name=f"k_pow2_{i}",
47
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
48
+ ) for i in range(3)
49
+ ],
50
+ [
51
+ FuzzedParameter(
52
+ name=f"k{i}",
53
+ distribution={
54
+ ParameterAlias(f"k_any_{i}"): 0.8,
55
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
56
+ },
57
+ strict=True,
58
+ ) for i in range(3)
59
+ ],
60
+ [
61
+ FuzzedParameter(
62
+ name=f"y_k{i}",
63
+ distribution={
64
+ ParameterAlias(f"k{i}"): 1.0},
65
+ strict=True,
66
+ ) for i in range(3)
67
+ ],
68
+ FuzzedParameter(
69
+ name="density",
70
+ distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
71
+ ),
72
+ FuzzedParameter(
73
+ name="coalesced",
74
+ distribution={True: 0.5, False: 0.5},
75
+ ),
76
+ # Repeatable entropy for downstream applications.
77
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
78
+ ],
79
+ tensors=[
80
+ FuzzedSparseTensor(
81
+ name="x",
82
+ size=("k0", "k1", "k2"),
83
+ dim_parameter="dim_parameter",
84
+ sparse_dim="sparse_dim",
85
+ density="density",
86
+ coalesced="coalesced",
87
+ min_elements=4 * 1024,
88
+ max_elements=32 * 1024 ** 2,
89
+ dtype=dtype,
90
+ cuda=cuda,
91
+ ),
92
+ FuzzedSparseTensor(
93
+ name="y",
94
+ size=("y_k0", "y_k1", "y_k2"),
95
+ dim_parameter="dim_parameter",
96
+ sparse_dim="sparse_dim",
97
+ density="density",
98
+ coalesced="coalesced",
99
+ min_elements=4 * 1024,
100
+ max_elements=32 * 1024 ** 2,
101
+ dtype=dtype,
102
+ cuda=cuda,
103
+ ),
104
+ ],
105
+ seed=seed,
106
+ )
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/sparse_unary.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import torch
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+ class UnaryOpSparseFuzzer(Fuzzer):
15
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
16
+ super().__init__(
17
+ parameters=[
18
+ # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.)
19
+ FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
20
+ FuzzedParameter(
21
+ name="sparse_dim",
22
+ distribution={1: 0.4, 2: 0.4, 3: 0.2},
23
+ strict=True
24
+ ),
25
+ # Shapes for `x`.
26
+ # It is important to test all shapes, however
27
+ # powers of two are especially important and therefore
28
+ # warrant special attention. This is done by generating
29
+ # both a value drawn from all integers between the min and
30
+ # max allowed values, and another from only the powers of two
31
+ # (both distributions are loguniform) and then randomly
32
+ # selecting between the two.
33
+ [
34
+ FuzzedParameter(
35
+ name=f"k_any_{i}",
36
+ minval=_MIN_DIM_SIZE,
37
+ maxval=_MAX_DIM_SIZE,
38
+ distribution="loguniform",
39
+ ) for i in range(3)
40
+ ],
41
+ [
42
+ FuzzedParameter(
43
+ name=f"k_pow2_{i}",
44
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
45
+ ) for i in range(3)
46
+ ],
47
+ [
48
+ FuzzedParameter(
49
+ name=f"k{i}",
50
+ distribution={
51
+ ParameterAlias(f"k_any_{i}"): 0.8,
52
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
53
+ },
54
+ strict=True,
55
+ ) for i in range(3)
56
+ ],
57
+ FuzzedParameter(
58
+ name="density",
59
+ distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3},
60
+ ),
61
+ FuzzedParameter(
62
+ name="coalesced",
63
+ distribution={True: 0.5, False: 0.5},
64
+ ),
65
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
66
+ ],
67
+ tensors=[
68
+ FuzzedSparseTensor(
69
+ name="x",
70
+ size=("k0", "k1", "k2"),
71
+ dim_parameter="dim_parameter",
72
+ sparse_dim="sparse_dim",
73
+ min_elements=4 * 1024,
74
+ max_elements=32 * 1024 ** 2,
75
+ density="density",
76
+ coalesced="coalesced",
77
+ dtype=dtype,
78
+ cuda=cuda,
79
+ ),
80
+ ],
81
+ seed=seed,
82
+ )
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/spectral.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+
3
+ import torch
4
+ from torch.utils import benchmark
5
+ from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias
6
+
7
+
8
+ __all__ = ['SpectralOpFuzzer']
9
+
10
+ MIN_DIM_SIZE = 16
11
+ MAX_DIM_SIZE = 16 * 1024
12
+
13
+ def power_range(upper_bound, base):
14
+ return (base ** i for i in range(int(math.log(upper_bound, base)) + 1))
15
+
16
+ # List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE
17
+ # These numbers factorize into multiples of prime factors 2, 3, and 5 only
18
+ # and are usually the fastest in FFT implementations.
19
+ REGULAR_SIZES = []
20
+ for i in power_range(MAX_DIM_SIZE, 2):
21
+ for j in power_range(MAX_DIM_SIZE // i, 3):
22
+ ij = i * j
23
+ for k in power_range(MAX_DIM_SIZE // ij, 5):
24
+ ijk = ij * k
25
+ if ijk > MIN_DIM_SIZE:
26
+ REGULAR_SIZES.append(ijk)
27
+ REGULAR_SIZES.sort()
28
+
29
+ class SpectralOpFuzzer(benchmark.Fuzzer):
30
+ def __init__(self, *, seed: int, dtype=torch.float64,
31
+ cuda: bool = False, probability_regular: float = 1.0):
32
+ super().__init__(
33
+ parameters=[
34
+ # Dimensionality of x. (e.g. 1D, 2D, or 3D.)
35
+ FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
36
+
37
+ # Shapes for `x`.
38
+ # It is important to test all shapes, however
39
+ # regular sizes are especially important to the FFT and therefore
40
+ # warrant special attention. This is done by generating
41
+ # both a value drawn from all integers between the min and
42
+ # max allowed values, and another from only the regular numbers
43
+ # (both distributions are loguniform) and then randomly
44
+ # selecting between the two.
45
+ [
46
+ FuzzedParameter(
47
+ name=f"k_any_{i}",
48
+ minval=MIN_DIM_SIZE,
49
+ maxval=MAX_DIM_SIZE,
50
+ distribution="loguniform",
51
+ ) for i in range(3)
52
+ ],
53
+ [
54
+ FuzzedParameter(
55
+ name=f"k_regular_{i}",
56
+ distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES}
57
+ ) for i in range(3)
58
+ ],
59
+ [
60
+ FuzzedParameter(
61
+ name=f"k{i}",
62
+ distribution={
63
+ ParameterAlias(f"k_regular_{i}"): probability_regular,
64
+ ParameterAlias(f"k_any_{i}"): 1 - probability_regular,
65
+ },
66
+ strict=True,
67
+ ) for i in range(3)
68
+ ],
69
+
70
+ # Steps for `x`. (Benchmarks strided memory access.)
71
+ [
72
+ FuzzedParameter(
73
+ name=f"step_{i}",
74
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
75
+ ) for i in range(3)
76
+ ],
77
+ ],
78
+ tensors=[
79
+ FuzzedTensor(
80
+ name="x",
81
+ size=("k0", "k1", "k2"),
82
+ steps=("step_0", "step_1", "step_2"),
83
+ probability_contiguous=0.75,
84
+ min_elements=4 * 1024,
85
+ max_elements=32 * 1024 ** 2,
86
+ max_allocation_bytes=2 * 1024**3, # 2 GB
87
+ dim_parameter="ndim",
88
+ dtype=dtype,
89
+ cuda=cuda,
90
+ ),
91
+ ],
92
+ seed=seed,
93
+ )
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/op_fuzzers/unary.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor
5
+
6
+
7
+ _MIN_DIM_SIZE = 16
8
+ _MAX_DIM_SIZE = 16 * 1024 ** 2
9
+ _POW_TWO_SIZES = tuple(2 ** i for i in range(
10
+ int(np.log2(_MIN_DIM_SIZE)),
11
+ int(np.log2(_MAX_DIM_SIZE)) + 1,
12
+ ))
13
+
14
+
15
+ class UnaryOpFuzzer(Fuzzer):
16
+ def __init__(self, seed, dtype=torch.float32, cuda=False):
17
+ super().__init__(
18
+ parameters=[
19
+ # Dimensionality of x. (e.g. 1D, 2D, or 3D.)
20
+ FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
21
+
22
+ # Shapes for `x`.
23
+ # It is important to test all shapes, however
24
+ # powers of two are especially important and therefore
25
+ # warrant special attention. This is done by generating
26
+ # both a value drawn from all integers between the min and
27
+ # max allowed values, and another from only the powers of two
28
+ # (both distributions are loguniform) and then randomly
29
+ # selecting between the two.
30
+ [
31
+ FuzzedParameter(
32
+ name=f"k_any_{i}",
33
+ minval=_MIN_DIM_SIZE,
34
+ maxval=_MAX_DIM_SIZE,
35
+ distribution="loguniform",
36
+ ) for i in range(3)
37
+ ],
38
+ [
39
+ FuzzedParameter(
40
+ name=f"k_pow2_{i}",
41
+ distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
42
+ ) for i in range(3)
43
+ ],
44
+ [
45
+ FuzzedParameter(
46
+ name=f"k{i}",
47
+ distribution={
48
+ ParameterAlias(f"k_any_{i}"): 0.8,
49
+ ParameterAlias(f"k_pow2_{i}"): 0.2,
50
+ },
51
+ strict=True,
52
+ ) for i in range(3)
53
+ ],
54
+
55
+ # Steps for `x`. (Benchmarks strided memory access.)
56
+ [
57
+ FuzzedParameter(
58
+ name=f"x_step_{i}",
59
+ distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
60
+ ) for i in range(3)
61
+ ],
62
+
63
+ # Repeatable entropy for downstream applications.
64
+ FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
65
+ ],
66
+ tensors=[
67
+ FuzzedTensor(
68
+ name="x",
69
+ size=("k0", "k1", "k2"),
70
+ steps=("x_step_0", "x_step_1", "x_step_2"),
71
+ probability_contiguous=0.75,
72
+ min_elements=4 * 1024,
73
+ max_elements=32 * 1024 ** 2,
74
+ max_allocation_bytes=2 * 1024**3, # 2 GB
75
+ dim_parameter="dim",
76
+ dtype=dtype,
77
+ cuda=cuda,
78
+ ),
79
+ ],
80
+ seed=seed,
81
+ )
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/utils/__init__.py ADDED
File without changes
llmeval-env/lib/python3.10/site-packages/torch/utils/benchmark/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (200 Bytes). View file