Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/License.txt +1568 -0
- llmeval-env/lib/python3.10/site-packages/torch/_refs/fft.py +590 -0
- llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/_refs/special/__init__.py +236 -0
- llmeval-env/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/grad_scaler.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/bin/torch_shm_manager +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/compiler/__init__.py +193 -0
- llmeval-env/lib/python3.10/site-packages/torch/compiler/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so +3 -0
- llmeval-env/lib/python3.10/site-packages/torch/nested/__init__.py +253 -0
- llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py +431 -0
- llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/ops.py +1120 -0
- llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py +780 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py +31 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py +15 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py +1 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py +5 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py +5 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py +9 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__init__.py +68 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/normalization.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -114,3 +114,4 @@ llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_cnn_train.so.
|
|
114 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
|
115 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
|
116 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
114 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libc10.so filter=lfs diff=lfs merge=lfs -text
|
115 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
|
116 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
|
117 |
+
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/License.txt
ADDED
@@ -0,0 +1,1568 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
End User License Agreement
|
2 |
+
--------------------------
|
3 |
+
|
4 |
+
|
5 |
+
Preface
|
6 |
+
-------
|
7 |
+
|
8 |
+
The Software License Agreement in Chapter 1 and the Supplement
|
9 |
+
in Chapter 2 contain license terms and conditions that govern
|
10 |
+
the use of NVIDIA software. By accepting this agreement, you
|
11 |
+
agree to comply with all the terms and conditions applicable
|
12 |
+
to the product(s) included herein.
|
13 |
+
|
14 |
+
|
15 |
+
NVIDIA Driver
|
16 |
+
|
17 |
+
|
18 |
+
Description
|
19 |
+
|
20 |
+
This package contains the operating system driver and
|
21 |
+
fundamental system software components for NVIDIA GPUs.
|
22 |
+
|
23 |
+
|
24 |
+
NVIDIA CUDA Toolkit
|
25 |
+
|
26 |
+
|
27 |
+
Description
|
28 |
+
|
29 |
+
The NVIDIA CUDA Toolkit provides command-line and graphical
|
30 |
+
tools for building, debugging and optimizing the performance
|
31 |
+
of applications accelerated by NVIDIA GPUs, runtime and math
|
32 |
+
libraries, and documentation including programming guides,
|
33 |
+
user manuals, and API references.
|
34 |
+
|
35 |
+
|
36 |
+
Default Install Location of CUDA Toolkit
|
37 |
+
|
38 |
+
Windows platform:
|
39 |
+
|
40 |
+
%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
|
41 |
+
|
42 |
+
Linux platform:
|
43 |
+
|
44 |
+
/usr/local/cuda-#.#
|
45 |
+
|
46 |
+
Mac platform:
|
47 |
+
|
48 |
+
/Developer/NVIDIA/CUDA-#.#
|
49 |
+
|
50 |
+
|
51 |
+
NVIDIA CUDA Samples
|
52 |
+
|
53 |
+
|
54 |
+
Description
|
55 |
+
|
56 |
+
This package includes over 100+ CUDA examples that demonstrate
|
57 |
+
various CUDA programming principles, and efficient CUDA
|
58 |
+
implementation of algorithms in specific application domains.
|
59 |
+
|
60 |
+
|
61 |
+
Default Install Location of CUDA Samples
|
62 |
+
|
63 |
+
Windows platform:
|
64 |
+
|
65 |
+
%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
|
66 |
+
|
67 |
+
Linux platform:
|
68 |
+
|
69 |
+
/usr/local/cuda-#.#/samples
|
70 |
+
|
71 |
+
and
|
72 |
+
|
73 |
+
$HOME/NVIDIA_CUDA-#.#_Samples
|
74 |
+
|
75 |
+
Mac platform:
|
76 |
+
|
77 |
+
/Developer/NVIDIA/CUDA-#.#/samples
|
78 |
+
|
79 |
+
|
80 |
+
NVIDIA Nsight Visual Studio Edition (Windows only)
|
81 |
+
|
82 |
+
|
83 |
+
Description
|
84 |
+
|
85 |
+
NVIDIA Nsight Development Platform, Visual Studio Edition is a
|
86 |
+
development environment integrated into Microsoft Visual
|
87 |
+
Studio that provides tools for debugging, profiling, analyzing
|
88 |
+
and optimizing your GPU computing and graphics applications.
|
89 |
+
|
90 |
+
|
91 |
+
Default Install Location of Nsight Visual Studio Edition
|
92 |
+
|
93 |
+
Windows platform:
|
94 |
+
|
95 |
+
%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
|
96 |
+
|
97 |
+
|
98 |
+
1. License Agreement for NVIDIA Software Development Kits
|
99 |
+
---------------------------------------------------------
|
100 |
+
|
101 |
+
|
102 |
+
Release Date: July 26, 2018
|
103 |
+
---------------------------
|
104 |
+
|
105 |
+
|
106 |
+
Important NoticeRead before downloading, installing,
|
107 |
+
copying or using the licensed software:
|
108 |
+
-------------------------------------------------------
|
109 |
+
|
110 |
+
This license agreement, including exhibits attached
|
111 |
+
("Agreement”) is a legal agreement between you and NVIDIA
|
112 |
+
Corporation ("NVIDIA") and governs your use of a NVIDIA
|
113 |
+
software development kit (“SDK”).
|
114 |
+
|
115 |
+
Each SDK has its own set of software and materials, but here
|
116 |
+
is a description of the types of items that may be included in
|
117 |
+
a SDK: source code, header files, APIs, data sets and assets
|
118 |
+
(examples include images, textures, models, scenes, videos,
|
119 |
+
native API input/output files), binary software, sample code,
|
120 |
+
libraries, utility programs, programming code and
|
121 |
+
documentation.
|
122 |
+
|
123 |
+
This Agreement can be accepted only by an adult of legal age
|
124 |
+
of majority in the country in which the SDK is used.
|
125 |
+
|
126 |
+
If you are entering into this Agreement on behalf of a company
|
127 |
+
or other legal entity, you represent that you have the legal
|
128 |
+
authority to bind the entity to this Agreement, in which case
|
129 |
+
“you” will mean the entity you represent.
|
130 |
+
|
131 |
+
If you don’t have the required age or authority to accept
|
132 |
+
this Agreement, or if you don’t accept all the terms and
|
133 |
+
conditions of this Agreement, do not download, install or use
|
134 |
+
the SDK.
|
135 |
+
|
136 |
+
You agree to use the SDK only for purposes that are permitted
|
137 |
+
by (a) this Agreement, and (b) any applicable law, regulation
|
138 |
+
or generally accepted practices or guidelines in the relevant
|
139 |
+
jurisdictions.
|
140 |
+
|
141 |
+
|
142 |
+
1.1. License
|
143 |
+
|
144 |
+
|
145 |
+
1.1.1. License Grant
|
146 |
+
|
147 |
+
Subject to the terms of this Agreement, NVIDIA hereby grants
|
148 |
+
you a non-exclusive, non-transferable license, without the
|
149 |
+
right to sublicense (except as expressly provided in this
|
150 |
+
Agreement) to:
|
151 |
+
|
152 |
+
1. Install and use the SDK,
|
153 |
+
|
154 |
+
2. Modify and create derivative works of sample source code
|
155 |
+
delivered in the SDK, and
|
156 |
+
|
157 |
+
3. Distribute those portions of the SDK that are identified
|
158 |
+
in this Agreement as distributable, as incorporated in
|
159 |
+
object code format into a software application that meets
|
160 |
+
the distribution requirements indicated in this Agreement.
|
161 |
+
|
162 |
+
|
163 |
+
1.1.2. Distribution Requirements
|
164 |
+
|
165 |
+
These are the distribution requirements for you to exercise
|
166 |
+
the distribution grant:
|
167 |
+
|
168 |
+
1. Your application must have material additional
|
169 |
+
functionality, beyond the included portions of the SDK.
|
170 |
+
|
171 |
+
2. The distributable portions of the SDK shall only be
|
172 |
+
accessed by your application.
|
173 |
+
|
174 |
+
3. The following notice shall be included in modifications
|
175 |
+
and derivative works of sample source code distributed:
|
176 |
+
“This software contains source code provided by NVIDIA
|
177 |
+
Corporation.”
|
178 |
+
|
179 |
+
4. Unless a developer tool is identified in this Agreement
|
180 |
+
as distributable, it is delivered for your internal use
|
181 |
+
only.
|
182 |
+
|
183 |
+
5. The terms under which you distribute your application
|
184 |
+
must be consistent with the terms of this Agreement,
|
185 |
+
including (without limitation) terms relating to the
|
186 |
+
license grant and license restrictions and protection of
|
187 |
+
NVIDIA’s intellectual property rights. Additionally, you
|
188 |
+
agree that you will protect the privacy, security and
|
189 |
+
legal rights of your application users.
|
190 |
+
|
191 |
+
6. You agree to notify NVIDIA in writing of any known or
|
192 |
+
suspected distribution or use of the SDK not in compliance
|
193 |
+
with the requirements of this Agreement, and to enforce
|
194 |
+
the terms of your agreements with respect to distributed
|
195 |
+
SDK.
|
196 |
+
|
197 |
+
|
198 |
+
1.1.3. Authorized Users
|
199 |
+
|
200 |
+
You may allow employees and contractors of your entity or of
|
201 |
+
your subsidiary(ies) to access and use the SDK from your
|
202 |
+
secure network to perform work on your behalf.
|
203 |
+
|
204 |
+
If you are an academic institution you may allow users
|
205 |
+
enrolled or employed by the academic institution to access and
|
206 |
+
use the SDK from your secure network.
|
207 |
+
|
208 |
+
You are responsible for the compliance with the terms of this
|
209 |
+
Agreement by your authorized users. If you become aware that
|
210 |
+
your authorized users didn’t follow the terms of this
|
211 |
+
Agreement, you agree to take reasonable steps to resolve the
|
212 |
+
non-compliance and prevent new occurrences.
|
213 |
+
|
214 |
+
|
215 |
+
1.1.4. Pre-Release SDK
|
216 |
+
|
217 |
+
The SDK versions identified as alpha, beta, preview or
|
218 |
+
otherwise as pre-release, may not be fully functional, may
|
219 |
+
contain errors or design flaws, and may have reduced or
|
220 |
+
different security, privacy, accessibility, availability, and
|
221 |
+
reliability standards relative to commercial versions of
|
222 |
+
NVIDIA software and materials. Use of a pre-release SDK may
|
223 |
+
result in unexpected results, loss of data, project delays or
|
224 |
+
other unpredictable damage or loss.
|
225 |
+
|
226 |
+
You may use a pre-release SDK at your own risk, understanding
|
227 |
+
that pre-release SDKs are not intended for use in production
|
228 |
+
or business-critical systems.
|
229 |
+
|
230 |
+
NVIDIA may choose not to make available a commercial version
|
231 |
+
of any pre-release SDK. NVIDIA may also choose to abandon
|
232 |
+
development and terminate the availability of a pre-release
|
233 |
+
SDK at any time without liability.
|
234 |
+
|
235 |
+
|
236 |
+
1.1.5. Updates
|
237 |
+
|
238 |
+
NVIDIA may, at its option, make available patches, workarounds
|
239 |
+
or other updates to this SDK. Unless the updates are provided
|
240 |
+
with their separate governing terms, they are deemed part of
|
241 |
+
the SDK licensed to you as provided in this Agreement. You
|
242 |
+
agree that the form and content of the SDK that NVIDIA
|
243 |
+
provides may change without prior notice to you. While NVIDIA
|
244 |
+
generally maintains compatibility between versions, NVIDIA may
|
245 |
+
in some cases make changes that introduce incompatibilities in
|
246 |
+
future versions of the SDK.
|
247 |
+
|
248 |
+
|
249 |
+
1.1.6. Third Party Licenses
|
250 |
+
|
251 |
+
The SDK may come bundled with, or otherwise include or be
|
252 |
+
distributed with, third party software licensed by a NVIDIA
|
253 |
+
supplier and/or open source software provided under an open
|
254 |
+
source license. Use of third party software is subject to the
|
255 |
+
third-party license terms, or in the absence of third party
|
256 |
+
terms, the terms of this Agreement. Copyright to third party
|
257 |
+
software is held by the copyright holders indicated in the
|
258 |
+
third-party software or license.
|
259 |
+
|
260 |
+
|
261 |
+
1.1.7. Reservation of Rights
|
262 |
+
|
263 |
+
NVIDIA reserves all rights, title, and interest in and to the
|
264 |
+
SDK, not expressly granted to you under this Agreement.
|
265 |
+
|
266 |
+
|
267 |
+
1.2. Limitations
|
268 |
+
|
269 |
+
The following license limitations apply to your use of the
|
270 |
+
SDK:
|
271 |
+
|
272 |
+
1. You may not reverse engineer, decompile or disassemble,
|
273 |
+
or remove copyright or other proprietary notices from any
|
274 |
+
portion of the SDK or copies of the SDK.
|
275 |
+
|
276 |
+
2. Except as expressly provided in this Agreement, you may
|
277 |
+
not copy, sell, rent, sublicense, transfer, distribute,
|
278 |
+
modify, or create derivative works of any portion of the
|
279 |
+
SDK. For clarity, you may not distribute or sublicense the
|
280 |
+
SDK as a stand-alone product.
|
281 |
+
|
282 |
+
3. Unless you have an agreement with NVIDIA for this
|
283 |
+
purpose, you may not indicate that an application created
|
284 |
+
with the SDK is sponsored or endorsed by NVIDIA.
|
285 |
+
|
286 |
+
4. You may not bypass, disable, or circumvent any
|
287 |
+
encryption, security, digital rights management or
|
288 |
+
authentication mechanism in the SDK.
|
289 |
+
|
290 |
+
5. You may not use the SDK in any manner that would cause it
|
291 |
+
to become subject to an open source software license. As
|
292 |
+
examples, licenses that require as a condition of use,
|
293 |
+
modification, and/or distribution that the SDK be:
|
294 |
+
|
295 |
+
a. Disclosed or distributed in source code form;
|
296 |
+
|
297 |
+
b. Licensed for the purpose of making derivative works;
|
298 |
+
or
|
299 |
+
|
300 |
+
c. Redistributable at no charge.
|
301 |
+
|
302 |
+
6. Unless you have an agreement with NVIDIA for this
|
303 |
+
purpose, you may not use the SDK with any system or
|
304 |
+
application where the use or failure of the system or
|
305 |
+
application can reasonably be expected to threaten or
|
306 |
+
result in personal injury, death, or catastrophic loss.
|
307 |
+
Examples include use in avionics, navigation, military,
|
308 |
+
medical, life support or other life critical applications.
|
309 |
+
NVIDIA does not design, test or manufacture the SDK for
|
310 |
+
these critical uses and NVIDIA shall not be liable to you
|
311 |
+
or any third party, in whole or in part, for any claims or
|
312 |
+
damages arising from such uses.
|
313 |
+
|
314 |
+
7. You agree to defend, indemnify and hold harmless NVIDIA
|
315 |
+
and its affiliates, and their respective employees,
|
316 |
+
contractors, agents, officers and directors, from and
|
317 |
+
against any and all claims, damages, obligations, losses,
|
318 |
+
liabilities, costs or debt, fines, restitutions and
|
319 |
+
expenses (including but not limited to attorney’s fees
|
320 |
+
and costs incident to establishing the right of
|
321 |
+
indemnification) arising out of or related to your use of
|
322 |
+
the SDK outside of the scope of this Agreement, or not in
|
323 |
+
compliance with its terms.
|
324 |
+
|
325 |
+
|
326 |
+
1.3. Ownership
|
327 |
+
|
328 |
+
1. NVIDIA or its licensors hold all rights, title and
|
329 |
+
interest in and to the SDK and its modifications and
|
330 |
+
derivative works, including their respective intellectual
|
331 |
+
property rights, subject to your rights described in this
|
332 |
+
section. This SDK may include software and materials from
|
333 |
+
NVIDIA’s licensors, and these licensors are intended
|
334 |
+
third party beneficiaries that may enforce this Agreement
|
335 |
+
with respect to their intellectual property rights.
|
336 |
+
|
337 |
+
2. You hold all rights, title and interest in and to your
|
338 |
+
applications and your derivative works of the sample
|
339 |
+
source code delivered in the SDK, including their
|
340 |
+
respective intellectual property rights, subject to
|
341 |
+
NVIDIA’s rights described in this section.
|
342 |
+
|
343 |
+
3. You may, but don’t have to, provide to NVIDIA
|
344 |
+
suggestions, feature requests or other feedback regarding
|
345 |
+
the SDK, including possible enhancements or modifications
|
346 |
+
to the SDK. For any feedback that you voluntarily provide,
|
347 |
+
you hereby grant NVIDIA and its affiliates a perpetual,
|
348 |
+
non-exclusive, worldwide, irrevocable license to use,
|
349 |
+
reproduce, modify, license, sublicense (through multiple
|
350 |
+
tiers of sublicensees), and distribute (through multiple
|
351 |
+
tiers of distributors) it without the payment of any
|
352 |
+
royalties or fees to you. NVIDIA will use feedback at its
|
353 |
+
choice. NVIDIA is constantly looking for ways to improve
|
354 |
+
its products, so you may send feedback to NVIDIA through
|
355 |
+
the developer portal at https://developer.nvidia.com.
|
356 |
+
|
357 |
+
|
358 |
+
1.4. No Warranties
|
359 |
+
|
360 |
+
THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
|
361 |
+
FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
|
362 |
+
ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
|
363 |
+
OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
|
364 |
+
BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
|
365 |
+
FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
|
366 |
+
ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
|
367 |
+
WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
|
368 |
+
DEALING OR COURSE OF TRADE.
|
369 |
+
|
370 |
+
|
371 |
+
1.5. Limitation of Liability
|
372 |
+
|
373 |
+
TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
|
374 |
+
AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
|
375 |
+
PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
|
376 |
+
OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
|
377 |
+
PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
|
378 |
+
WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
|
379 |
+
WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
|
380 |
+
OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
|
381 |
+
PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
|
382 |
+
LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
|
383 |
+
TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
|
384 |
+
AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
|
385 |
+
NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
|
386 |
+
LIMIT.
|
387 |
+
|
388 |
+
These exclusions and limitations of liability shall apply
|
389 |
+
regardless if NVIDIA or its affiliates have been advised of
|
390 |
+
the possibility of such damages, and regardless of whether a
|
391 |
+
remedy fails its essential purpose. These exclusions and
|
392 |
+
limitations of liability form an essential basis of the
|
393 |
+
bargain between the parties, and, absent any of these
|
394 |
+
exclusions or limitations of liability, the provisions of this
|
395 |
+
Agreement, including, without limitation, the economic terms,
|
396 |
+
would be substantially different.
|
397 |
+
|
398 |
+
|
399 |
+
1.6. Termination
|
400 |
+
|
401 |
+
1. This Agreement will continue to apply until terminated by
|
402 |
+
either you or NVIDIA as described below.
|
403 |
+
|
404 |
+
2. If you want to terminate this Agreement, you may do so by
|
405 |
+
stopping to use the SDK.
|
406 |
+
|
407 |
+
3. NVIDIA may, at any time, terminate this Agreement if:
|
408 |
+
|
409 |
+
a. (i) you fail to comply with any term of this
|
410 |
+
Agreement and the non-compliance is not fixed within
|
411 |
+
thirty (30) days following notice from NVIDIA (or
|
412 |
+
immediately if you violate NVIDIA’s intellectual
|
413 |
+
property rights);
|
414 |
+
|
415 |
+
b. (ii) you commence or participate in any legal
|
416 |
+
proceeding against NVIDIA with respect to the SDK; or
|
417 |
+
|
418 |
+
c. (iii) NVIDIA decides to no longer provide the SDK in
|
419 |
+
a country or, in NVIDIA’s sole discretion, the
|
420 |
+
continued use of it is no longer commercially viable.
|
421 |
+
|
422 |
+
4. Upon any termination of this Agreement, you agree to
|
423 |
+
promptly discontinue use of the SDK and destroy all copies
|
424 |
+
in your possession or control. Your prior distributions in
|
425 |
+
accordance with this Agreement are not affected by the
|
426 |
+
termination of this Agreement. Upon written request, you
|
427 |
+
will certify in writing that you have complied with your
|
428 |
+
commitments under this section. Upon any termination of
|
429 |
+
this Agreement all provisions survive except for the
|
430 |
+
license grant provisions.
|
431 |
+
|
432 |
+
|
433 |
+
1.7. General
|
434 |
+
|
435 |
+
If you wish to assign this Agreement or your rights and
|
436 |
+
obligations, including by merger, consolidation, dissolution
|
437 |
+
or operation of law, contact NVIDIA to ask for permission. Any
|
438 |
+
attempted assignment not approved by NVIDIA in writing shall
|
439 |
+
be void and of no effect. NVIDIA may assign, delegate or
|
440 |
+
transfer this Agreement and its rights and obligations, and if
|
441 |
+
to a non-affiliate you will be notified.
|
442 |
+
|
443 |
+
You agree to cooperate with NVIDIA and provide reasonably
|
444 |
+
requested information to verify your compliance with this
|
445 |
+
Agreement.
|
446 |
+
|
447 |
+
This Agreement will be governed in all respects by the laws of
|
448 |
+
the United States and of the State of Delaware as those laws
|
449 |
+
are applied to contracts entered into and performed entirely
|
450 |
+
within Delaware by Delaware residents, without regard to the
|
451 |
+
conflicts of laws principles. The United Nations Convention on
|
452 |
+
Contracts for the International Sale of Goods is specifically
|
453 |
+
disclaimed. You agree to all terms of this Agreement in the
|
454 |
+
English language.
|
455 |
+
|
456 |
+
The state or federal courts residing in Santa Clara County,
|
457 |
+
California shall have exclusive jurisdiction over any dispute
|
458 |
+
or claim arising out of this Agreement. Notwithstanding this,
|
459 |
+
you agree that NVIDIA shall still be allowed to apply for
|
460 |
+
injunctive remedies or an equivalent type of urgent legal
|
461 |
+
relief in any jurisdiction.
|
462 |
+
|
463 |
+
If any court of competent jurisdiction determines that any
|
464 |
+
provision of this Agreement is illegal, invalid or
|
465 |
+
unenforceable, such provision will be construed as limited to
|
466 |
+
the extent necessary to be consistent with and fully
|
467 |
+
enforceable under the law and the remaining provisions will
|
468 |
+
remain in full force and effect. Unless otherwise specified,
|
469 |
+
remedies are cumulative.
|
470 |
+
|
471 |
+
Each party acknowledges and agrees that the other is an
|
472 |
+
independent contractor in the performance of this Agreement.
|
473 |
+
|
474 |
+
The SDK has been developed entirely at private expense and is
|
475 |
+
“commercial items” consisting of “commercial computer
|
476 |
+
software” and “commercial computer software
|
477 |
+
documentation” provided with RESTRICTED RIGHTS. Use,
|
478 |
+
duplication or disclosure by the U.S. Government or a U.S.
|
479 |
+
Government subcontractor is subject to the restrictions in
|
480 |
+
this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
|
481 |
+
in subparagraphs (c)(1) and (2) of the Commercial Computer
|
482 |
+
Software - Restricted Rights clause at FAR 52.227-19, as
|
483 |
+
applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
|
484 |
+
Expressway, Santa Clara, CA 95051.
|
485 |
+
|
486 |
+
The SDK is subject to United States export laws and
|
487 |
+
regulations. You agree that you will not ship, transfer or
|
488 |
+
export the SDK into any country, or use the SDK in any manner,
|
489 |
+
prohibited by the United States Bureau of Industry and
|
490 |
+
Security or economic sanctions regulations administered by the
|
491 |
+
U.S. Department of Treasury’s Office of Foreign Assets
|
492 |
+
Control (OFAC), or any applicable export laws, restrictions or
|
493 |
+
regulations. These laws include restrictions on destinations,
|
494 |
+
end users and end use. By accepting this Agreement, you
|
495 |
+
confirm that you are not a resident or citizen of any country
|
496 |
+
currently embargoed by the U.S. and that you are not otherwise
|
497 |
+
prohibited from receiving the SDK.
|
498 |
+
|
499 |
+
Any notice delivered by NVIDIA to you under this Agreement
|
500 |
+
will be delivered via mail, email or fax. You agree that any
|
501 |
+
notices that NVIDIA sends you electronically will satisfy any
|
502 |
+
legal communication requirements. Please direct your legal
|
503 |
+
notices or other correspondence to NVIDIA Corporation, 2788
|
504 |
+
San Tomas Expressway, Santa Clara, California 95051, United
|
505 |
+
States of America, Attention: Legal Department.
|
506 |
+
|
507 |
+
This Agreement and any exhibits incorporated into this
|
508 |
+
Agreement constitute the entire agreement of the parties with
|
509 |
+
respect to the subject matter of this Agreement and supersede
|
510 |
+
all prior negotiations or documentation exchanged between the
|
511 |
+
parties relating to this SDK license. Any additional and/or
|
512 |
+
conflicting terms on documents issued by you are null, void,
|
513 |
+
and invalid. Any amendment or waiver under this Agreement
|
514 |
+
shall be in writing and signed by representatives of both
|
515 |
+
parties.
|
516 |
+
|
517 |
+
|
518 |
+
2. CUDA Toolkit Supplement to Software License Agreement for
|
519 |
+
NVIDIA Software Development Kits
|
520 |
+
------------------------------------------------------------
|
521 |
+
|
522 |
+
|
523 |
+
Release date: August 16, 2018
|
524 |
+
-----------------------------
|
525 |
+
|
526 |
+
The terms in this supplement govern your use of the NVIDIA
|
527 |
+
CUDA Toolkit SDK under the terms of your license agreement
|
528 |
+
(“Agreement”) as modified by this supplement. Capitalized
|
529 |
+
terms used but not defined below have the meaning assigned to
|
530 |
+
them in the Agreement.
|
531 |
+
|
532 |
+
This supplement is an exhibit to the Agreement and is
|
533 |
+
incorporated as an integral part of the Agreement. In the
|
534 |
+
event of conflict between the terms in this supplement and the
|
535 |
+
terms in the Agreement, the terms in this supplement govern.
|
536 |
+
|
537 |
+
|
538 |
+
2.1. License Scope
|
539 |
+
|
540 |
+
The SDK is licensed for you to develop applications only for
|
541 |
+
use in systems with NVIDIA GPUs.
|
542 |
+
|
543 |
+
|
544 |
+
2.2. Distribution
|
545 |
+
|
546 |
+
The portions of the SDK that are distributable under the
|
547 |
+
Agreement are listed in Attachment A.
|
548 |
+
|
549 |
+
|
550 |
+
2.3. Operating Systems
|
551 |
+
|
552 |
+
Those portions of the SDK designed exclusively for use on the
|
553 |
+
Linux or FreeBSD operating systems, or other operating systems
|
554 |
+
derived from the source code to these operating systems, may
|
555 |
+
be copied and redistributed for use in accordance with this
|
556 |
+
Agreement, provided that the object code files are not
|
557 |
+
modified in any way (except for unzipping of compressed
|
558 |
+
files).
|
559 |
+
|
560 |
+
|
561 |
+
2.4. Audio and Video Encoders and Decoders
|
562 |
+
|
563 |
+
You acknowledge and agree that it is your sole responsibility
|
564 |
+
to obtain any additional third-party licenses required to
|
565 |
+
make, have made, use, have used, sell, import, and offer for
|
566 |
+
sale your products or services that include or incorporate any
|
567 |
+
third-party software and content relating to audio and/or
|
568 |
+
video encoders and decoders from, including but not limited
|
569 |
+
to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
|
570 |
+
MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
|
571 |
+
under this Agreement any necessary patent or other rights with
|
572 |
+
respect to any audio and/or video encoders and decoders.
|
573 |
+
|
574 |
+
|
575 |
+
2.5. Licensing
|
576 |
+
|
577 |
+
If the distribution terms in this Agreement are not suitable
|
578 |
+
for your organization, or for any questions regarding this
|
579 |
+
Agreement, please contact NVIDIA at
|
580 | |
581 |
+
|
582 |
+
|
583 |
+
2.6. Attachment A
|
584 |
+
|
585 |
+
The following portions of the SDK are distributable under the
|
586 |
+
Agreement:
|
587 |
+
|
588 |
+
Component
|
589 |
+
|
590 |
+
CUDA Runtime
|
591 |
+
|
592 |
+
Windows
|
593 |
+
|
594 |
+
cudart.dll, cudart_static.lib, cudadevrt.lib
|
595 |
+
|
596 |
+
Mac OSX
|
597 |
+
|
598 |
+
libcudart.dylib, libcudart_static.a, libcudadevrt.a
|
599 |
+
|
600 |
+
Linux
|
601 |
+
|
602 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
603 |
+
|
604 |
+
Android
|
605 |
+
|
606 |
+
libcudart.so, libcudart_static.a, libcudadevrt.a
|
607 |
+
|
608 |
+
Component
|
609 |
+
|
610 |
+
CUDA FFT Library
|
611 |
+
|
612 |
+
Windows
|
613 |
+
|
614 |
+
cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
|
615 |
+
|
616 |
+
Mac OSX
|
617 |
+
|
618 |
+
libcufft.dylib, libcufft_static.a, libcufftw.dylib,
|
619 |
+
libcufftw_static.a
|
620 |
+
|
621 |
+
Linux
|
622 |
+
|
623 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
624 |
+
libcufftw_static.a
|
625 |
+
|
626 |
+
Android
|
627 |
+
|
628 |
+
libcufft.so, libcufft_static.a, libcufftw.so,
|
629 |
+
libcufftw_static.a
|
630 |
+
|
631 |
+
Component
|
632 |
+
|
633 |
+
CUDA BLAS Library
|
634 |
+
|
635 |
+
Windows
|
636 |
+
|
637 |
+
cublas.dll, cublasLt.dll
|
638 |
+
|
639 |
+
Mac OSX
|
640 |
+
|
641 |
+
libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
|
642 |
+
libcublasLt_static.a
|
643 |
+
|
644 |
+
Linux
|
645 |
+
|
646 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
647 |
+
libcublasLt_static.a
|
648 |
+
|
649 |
+
Android
|
650 |
+
|
651 |
+
libcublas.so, libcublasLt.so, libcublas_static.a,
|
652 |
+
libcublasLt_static.a
|
653 |
+
|
654 |
+
Component
|
655 |
+
|
656 |
+
NVIDIA "Drop-in" BLAS Library
|
657 |
+
|
658 |
+
Windows
|
659 |
+
|
660 |
+
nvblas.dll
|
661 |
+
|
662 |
+
Mac OSX
|
663 |
+
|
664 |
+
libnvblas.dylib
|
665 |
+
|
666 |
+
Linux
|
667 |
+
|
668 |
+
libnvblas.so
|
669 |
+
|
670 |
+
Component
|
671 |
+
|
672 |
+
CUDA Sparse Matrix Library
|
673 |
+
|
674 |
+
Windows
|
675 |
+
|
676 |
+
cusparse.dll, cusparse.lib
|
677 |
+
|
678 |
+
Mac OSX
|
679 |
+
|
680 |
+
libcusparse.dylib, libcusparse_static.a
|
681 |
+
|
682 |
+
Linux
|
683 |
+
|
684 |
+
libcusparse.so, libcusparse_static.a
|
685 |
+
|
686 |
+
Android
|
687 |
+
|
688 |
+
libcusparse.so, libcusparse_static.a
|
689 |
+
|
690 |
+
Component
|
691 |
+
|
692 |
+
CUDA Linear Solver Library
|
693 |
+
|
694 |
+
Windows
|
695 |
+
|
696 |
+
cusolver.dll, cusolver.lib
|
697 |
+
|
698 |
+
Mac OSX
|
699 |
+
|
700 |
+
libcusolver.dylib, libcusolver_static.a
|
701 |
+
|
702 |
+
Linux
|
703 |
+
|
704 |
+
libcusolver.so, libcusolver_static.a
|
705 |
+
|
706 |
+
Android
|
707 |
+
|
708 |
+
libcusolver.so, libcusolver_static.a
|
709 |
+
|
710 |
+
Component
|
711 |
+
|
712 |
+
CUDA Random Number Generation Library
|
713 |
+
|
714 |
+
Windows
|
715 |
+
|
716 |
+
curand.dll, curand.lib
|
717 |
+
|
718 |
+
Mac OSX
|
719 |
+
|
720 |
+
libcurand.dylib, libcurand_static.a
|
721 |
+
|
722 |
+
Linux
|
723 |
+
|
724 |
+
libcurand.so, libcurand_static.a
|
725 |
+
|
726 |
+
Android
|
727 |
+
|
728 |
+
libcurand.so, libcurand_static.a
|
729 |
+
|
730 |
+
Component
|
731 |
+
|
732 |
+
CUDA Accelerated Graph Library
|
733 |
+
|
734 |
+
Component
|
735 |
+
|
736 |
+
NVIDIA Performance Primitives Library
|
737 |
+
|
738 |
+
Windows
|
739 |
+
|
740 |
+
nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
|
741 |
+
nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
|
742 |
+
nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
|
743 |
+
nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
|
744 |
+
nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
|
745 |
+
|
746 |
+
Mac OSX
|
747 |
+
|
748 |
+
libnppc.dylib, libnppc_static.a, libnppial.dylib,
|
749 |
+
libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
|
750 |
+
libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
|
751 |
+
libnppidei_static.a, libnppif.dylib, libnppif_static.a,
|
752 |
+
libnppig.dylib, libnppig_static.a, libnppim.dylib,
|
753 |
+
libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
|
754 |
+
libnpps.dylib, libnpps_static.a
|
755 |
+
|
756 |
+
Linux
|
757 |
+
|
758 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
759 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
760 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
761 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
762 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
763 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
764 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
765 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
766 |
+
|
767 |
+
Android
|
768 |
+
|
769 |
+
libnppc.so, libnppc_static.a, libnppial.so,
|
770 |
+
libnppial_static.a, libnppicc.so, libnppicc_static.a,
|
771 |
+
libnppicom.so, libnppicom_static.a, libnppidei.so,
|
772 |
+
libnppidei_static.a, libnppif.so, libnppif_static.a
|
773 |
+
libnppig.so, libnppig_static.a, libnppim.so,
|
774 |
+
libnppim_static.a, libnppist.so, libnppist_static.a,
|
775 |
+
libnppisu.so, libnppisu_static.a, libnppitc.so
|
776 |
+
libnppitc_static.a, libnpps.so, libnpps_static.a
|
777 |
+
|
778 |
+
Component
|
779 |
+
|
780 |
+
NVIDIA JPEG Library
|
781 |
+
|
782 |
+
Linux
|
783 |
+
|
784 |
+
libnvjpeg.so, libnvjpeg_static.a
|
785 |
+
|
786 |
+
Component
|
787 |
+
|
788 |
+
Internal common library required for statically linking to
|
789 |
+
cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
|
790 |
+
|
791 |
+
Mac OSX
|
792 |
+
|
793 |
+
libculibos.a
|
794 |
+
|
795 |
+
Linux
|
796 |
+
|
797 |
+
libculibos.a
|
798 |
+
|
799 |
+
Component
|
800 |
+
|
801 |
+
NVIDIA Runtime Compilation Library and Header
|
802 |
+
|
803 |
+
All
|
804 |
+
|
805 |
+
nvrtc.h
|
806 |
+
|
807 |
+
Windows
|
808 |
+
|
809 |
+
nvrtc.dll, nvrtc-builtins.dll
|
810 |
+
|
811 |
+
Mac OSX
|
812 |
+
|
813 |
+
libnvrtc.dylib, libnvrtc-builtins.dylib
|
814 |
+
|
815 |
+
Linux
|
816 |
+
|
817 |
+
libnvrtc.so, libnvrtc-builtins.so
|
818 |
+
|
819 |
+
Component
|
820 |
+
|
821 |
+
NVIDIA Optimizing Compiler Library
|
822 |
+
|
823 |
+
Windows
|
824 |
+
|
825 |
+
nvvm.dll
|
826 |
+
|
827 |
+
Mac OSX
|
828 |
+
|
829 |
+
libnvvm.dylib
|
830 |
+
|
831 |
+
Linux
|
832 |
+
|
833 |
+
libnvvm.so
|
834 |
+
|
835 |
+
Component
|
836 |
+
|
837 |
+
NVIDIA Common Device Math Functions Library
|
838 |
+
|
839 |
+
Windows
|
840 |
+
|
841 |
+
libdevice.10.bc
|
842 |
+
|
843 |
+
Mac OSX
|
844 |
+
|
845 |
+
libdevice.10.bc
|
846 |
+
|
847 |
+
Linux
|
848 |
+
|
849 |
+
libdevice.10.bc
|
850 |
+
|
851 |
+
Component
|
852 |
+
|
853 |
+
CUDA Occupancy Calculation Header Library
|
854 |
+
|
855 |
+
All
|
856 |
+
|
857 |
+
cuda_occupancy.h
|
858 |
+
|
859 |
+
Component
|
860 |
+
|
861 |
+
CUDA Half Precision Headers
|
862 |
+
|
863 |
+
All
|
864 |
+
|
865 |
+
cuda_fp16.h, cuda_fp16.hpp
|
866 |
+
|
867 |
+
Component
|
868 |
+
|
869 |
+
CUDA Profiling Tools Interface (CUPTI) Library
|
870 |
+
|
871 |
+
Windows
|
872 |
+
|
873 |
+
cupti.dll
|
874 |
+
|
875 |
+
Mac OSX
|
876 |
+
|
877 |
+
libcupti.dylib
|
878 |
+
|
879 |
+
Linux
|
880 |
+
|
881 |
+
libcupti.so
|
882 |
+
|
883 |
+
Component
|
884 |
+
|
885 |
+
NVIDIA Tools Extension Library
|
886 |
+
|
887 |
+
Windows
|
888 |
+
|
889 |
+
nvToolsExt.dll, nvToolsExt.lib
|
890 |
+
|
891 |
+
Mac OSX
|
892 |
+
|
893 |
+
libnvToolsExt.dylib
|
894 |
+
|
895 |
+
Linux
|
896 |
+
|
897 |
+
libnvToolsExt.so
|
898 |
+
|
899 |
+
Component
|
900 |
+
|
901 |
+
NVIDIA CUDA Driver Libraries
|
902 |
+
|
903 |
+
Linux
|
904 |
+
|
905 |
+
libcuda.so, libnvidia-fatbinaryloader.so,
|
906 |
+
libnvidia-ptxjitcompiler.so
|
907 |
+
|
908 |
+
The NVIDIA CUDA Driver Libraries are only distributable in
|
909 |
+
applications that meet this criteria:
|
910 |
+
|
911 |
+
1. The application was developed starting from a NVIDIA CUDA
|
912 |
+
container obtained from Docker Hub or the NVIDIA GPU
|
913 |
+
Cloud, and
|
914 |
+
|
915 |
+
2. The resulting application is packaged as a Docker
|
916 |
+
container and distributed to users on Docker Hub or the
|
917 |
+
NVIDIA GPU Cloud only.
|
918 |
+
|
919 |
+
|
920 |
+
2.7. Attachment B
|
921 |
+
|
922 |
+
|
923 |
+
Additional Licensing Obligations
|
924 |
+
|
925 |
+
The following third party components included in the SOFTWARE
|
926 |
+
are licensed to Licensee pursuant to the following terms and
|
927 |
+
conditions:
|
928 |
+
|
929 |
+
1. Licensee's use of the GDB third party component is
|
930 |
+
subject to the terms and conditions of GNU GPL v3:
|
931 |
+
|
932 |
+
This product includes copyrighted third-party software licensed
|
933 |
+
under the terms of the GNU General Public License v3 ("GPL v3").
|
934 |
+
All third-party software packages are copyright by their respective
|
935 |
+
authors. GPL v3 terms and conditions are hereby incorporated into
|
936 |
+
the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
|
937 |
+
|
938 |
+
Consistent with these licensing requirements, the software
|
939 |
+
listed below is provided under the terms of the specified
|
940 |
+
open source software licenses. To obtain source code for
|
941 |
+
software provided under licenses that require
|
942 |
+
redistribution of source code, including the GNU General
|
943 |
+
Public License (GPL) and GNU Lesser General Public License
|
944 |
+
(LGPL), contact [email protected]. This offer is
|
945 |
+
valid for a period of three (3) years from the date of the
|
946 |
+
distribution of this product by NVIDIA CORPORATION.
|
947 |
+
|
948 |
+
Component License
|
949 |
+
CUDA-GDB GPL v3
|
950 |
+
|
951 |
+
2. Licensee represents and warrants that any and all third
|
952 |
+
party licensing and/or royalty payment obligations in
|
953 |
+
connection with Licensee's use of the H.264 video codecs
|
954 |
+
are solely the responsibility of Licensee.
|
955 |
+
|
956 |
+
3. Licensee's use of the Thrust library is subject to the
|
957 |
+
terms and conditions of the Apache License Version 2.0.
|
958 |
+
All third-party software packages are copyright by their
|
959 |
+
respective authors. Apache License Version 2.0 terms and
|
960 |
+
conditions are hereby incorporated into the Agreement by
|
961 |
+
this reference.
|
962 |
+
http://www.apache.org/licenses/LICENSE-2.0.html
|
963 |
+
|
964 |
+
In addition, Licensee acknowledges the following notice:
|
965 |
+
Thrust includes source code from the Boost Iterator,
|
966 |
+
Tuple, System, and Random Number libraries.
|
967 |
+
|
968 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
969 |
+
. . . .
|
970 |
+
|
971 |
+
Permission is hereby granted, free of charge, to any person or
|
972 |
+
organization obtaining a copy of the software and accompanying
|
973 |
+
documentation covered by this license (the "Software") to use,
|
974 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
975 |
+
and to prepare derivative works of the Software, and to permit
|
976 |
+
third-parties to whom the Software is furnished to do so, all
|
977 |
+
subject to the following:
|
978 |
+
|
979 |
+
The copyright notices in the Software and this entire statement,
|
980 |
+
including the above license grant, this restriction and the following
|
981 |
+
disclaimer, must be included in all copies of the Software, in whole
|
982 |
+
or in part, and all derivative works of the Software, unless such
|
983 |
+
copies or derivative works are solely in the form of machine-executable
|
984 |
+
object code generated by a source language processor.
|
985 |
+
|
986 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
987 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
988 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
989 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
990 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
991 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
992 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
993 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
994 |
+
|
995 |
+
4. Licensee's use of the LLVM third party component is
|
996 |
+
subject to the following terms and conditions:
|
997 |
+
|
998 |
+
======================================================
|
999 |
+
LLVM Release License
|
1000 |
+
======================================================
|
1001 |
+
University of Illinois/NCSA
|
1002 |
+
Open Source License
|
1003 |
+
|
1004 |
+
Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
|
1005 |
+
All rights reserved.
|
1006 |
+
|
1007 |
+
Developed by:
|
1008 |
+
|
1009 |
+
LLVM Team
|
1010 |
+
|
1011 |
+
University of Illinois at Urbana-Champaign
|
1012 |
+
|
1013 |
+
http://llvm.org
|
1014 |
+
|
1015 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
1016 |
+
of this software and associated documentation files (the "Software"), to
|
1017 |
+
deal with the Software without restriction, including without limitation the
|
1018 |
+
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
1019 |
+
sell copies of the Software, and to permit persons to whom the Software is
|
1020 |
+
furnished to do so, subject to the following conditions:
|
1021 |
+
|
1022 |
+
* Redistributions of source code must retain the above copyright notice,
|
1023 |
+
this list of conditions and the following disclaimers.
|
1024 |
+
|
1025 |
+
* Redistributions in binary form must reproduce the above copyright
|
1026 |
+
notice, this list of conditions and the following disclaimers in the
|
1027 |
+
documentation and/or other materials provided with the distribution.
|
1028 |
+
|
1029 |
+
* Neither the names of the LLVM Team, University of Illinois at Urbana-
|
1030 |
+
Champaign, nor the names of its contributors may be used to endorse or
|
1031 |
+
promote products derived from this Software without specific prior
|
1032 |
+
written permission.
|
1033 |
+
|
1034 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
1035 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1036 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
1037 |
+
THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
1038 |
+
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
1039 |
+
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
1040 |
+
DEALINGS WITH THE SOFTWARE.
|
1041 |
+
|
1042 |
+
5. Licensee's use (e.g. nvprof) of the PCRE third party
|
1043 |
+
component is subject to the following terms and
|
1044 |
+
conditions:
|
1045 |
+
|
1046 |
+
------------
|
1047 |
+
PCRE LICENCE
|
1048 |
+
------------
|
1049 |
+
PCRE is a library of functions to support regular expressions whose syntax
|
1050 |
+
and semantics are as close as possible to those of the Perl 5 language.
|
1051 |
+
Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
|
1052 |
+
specified below. The documentation for PCRE, supplied in the "doc"
|
1053 |
+
directory, is distributed under the same terms as the software itself. The
|
1054 |
+
basic library functions are written in C and are freestanding. Also
|
1055 |
+
included in the distribution is a set of C++ wrapper functions, and a just-
|
1056 |
+
in-time compiler that can be used to optimize pattern matching. These are
|
1057 |
+
both optional features that can be omitted when the library is built.
|
1058 |
+
|
1059 |
+
THE BASIC LIBRARY FUNCTIONS
|
1060 |
+
---------------------------
|
1061 |
+
Written by: Philip Hazel
|
1062 |
+
Email local part: ph10
|
1063 |
+
Email domain: cam.ac.uk
|
1064 |
+
University of Cambridge Computing Service,
|
1065 |
+
Cambridge, England.
|
1066 |
+
Copyright (c) 1997-2012 University of Cambridge
|
1067 |
+
All rights reserved.
|
1068 |
+
|
1069 |
+
PCRE JUST-IN-TIME COMPILATION SUPPORT
|
1070 |
+
-------------------------------------
|
1071 |
+
Written by: Zoltan Herczeg
|
1072 |
+
Email local part: hzmester
|
1073 |
+
Emain domain: freemail.hu
|
1074 |
+
Copyright(c) 2010-2012 Zoltan Herczeg
|
1075 |
+
All rights reserved.
|
1076 |
+
|
1077 |
+
STACK-LESS JUST-IN-TIME COMPILER
|
1078 |
+
--------------------------------
|
1079 |
+
Written by: Zoltan Herczeg
|
1080 |
+
Email local part: hzmester
|
1081 |
+
Emain domain: freemail.hu
|
1082 |
+
Copyright(c) 2009-2012 Zoltan Herczeg
|
1083 |
+
All rights reserved.
|
1084 |
+
|
1085 |
+
THE C++ WRAPPER FUNCTIONS
|
1086 |
+
-------------------------
|
1087 |
+
Contributed by: Google Inc.
|
1088 |
+
Copyright (c) 2007-2012, Google Inc.
|
1089 |
+
All rights reserved.
|
1090 |
+
|
1091 |
+
THE "BSD" LICENCE
|
1092 |
+
-----------------
|
1093 |
+
Redistribution and use in source and binary forms, with or without
|
1094 |
+
modification, are permitted provided that the following conditions are met:
|
1095 |
+
|
1096 |
+
* Redistributions of source code must retain the above copyright notice,
|
1097 |
+
this list of conditions and the following disclaimer.
|
1098 |
+
|
1099 |
+
* Redistributions in binary form must reproduce the above copyright
|
1100 |
+
notice, this list of conditions and the following disclaimer in the
|
1101 |
+
documentation and/or other materials provided with the distribution.
|
1102 |
+
|
1103 |
+
* Neither the name of the University of Cambridge nor the name of Google
|
1104 |
+
Inc. nor the names of their contributors may be used to endorse or
|
1105 |
+
promote products derived from this software without specific prior
|
1106 |
+
written permission.
|
1107 |
+
|
1108 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
1109 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
1110 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
1111 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
1112 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
1113 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
1114 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
1115 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
1116 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
1117 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1118 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1119 |
+
|
1120 |
+
6. Some of the cuBLAS library routines were written by or
|
1121 |
+
derived from code written by Vasily Volkov and are subject
|
1122 |
+
to the Modified Berkeley Software Distribution License as
|
1123 |
+
follows:
|
1124 |
+
|
1125 |
+
Copyright (c) 2007-2009, Regents of the University of California
|
1126 |
+
|
1127 |
+
All rights reserved.
|
1128 |
+
|
1129 |
+
Redistribution and use in source and binary forms, with or without
|
1130 |
+
modification, are permitted provided that the following conditions are
|
1131 |
+
met:
|
1132 |
+
* Redistributions of source code must retain the above copyright
|
1133 |
+
notice, this list of conditions and the following disclaimer.
|
1134 |
+
* Redistributions in binary form must reproduce the above
|
1135 |
+
copyright notice, this list of conditions and the following
|
1136 |
+
disclaimer in the documentation and/or other materials provided
|
1137 |
+
with the distribution.
|
1138 |
+
* Neither the name of the University of California, Berkeley nor
|
1139 |
+
the names of its contributors may be used to endorse or promote
|
1140 |
+
products derived from this software without specific prior
|
1141 |
+
written permission.
|
1142 |
+
|
1143 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
1144 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
1145 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
1146 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
1147 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
1148 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
1149 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
1150 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
1151 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
1152 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1153 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1154 |
+
|
1155 |
+
7. Some of the cuBLAS library routines were written by or
|
1156 |
+
derived from code written by Davide Barbieri and are
|
1157 |
+
subject to the Modified Berkeley Software Distribution
|
1158 |
+
License as follows:
|
1159 |
+
|
1160 |
+
Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
|
1161 |
+
|
1162 |
+
All rights reserved.
|
1163 |
+
|
1164 |
+
Redistribution and use in source and binary forms, with or without
|
1165 |
+
modification, are permitted provided that the following conditions are
|
1166 |
+
met:
|
1167 |
+
* Redistributions of source code must retain the above copyright
|
1168 |
+
notice, this list of conditions and the following disclaimer.
|
1169 |
+
* Redistributions in binary form must reproduce the above
|
1170 |
+
copyright notice, this list of conditions and the following
|
1171 |
+
disclaimer in the documentation and/or other materials provided
|
1172 |
+
with the distribution.
|
1173 |
+
* The name of the author may not be used to endorse or promote
|
1174 |
+
products derived from this software without specific prior
|
1175 |
+
written permission.
|
1176 |
+
|
1177 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
|
1178 |
+
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
1179 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
1180 |
+
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
|
1181 |
+
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
1182 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
1183 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
1184 |
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
1185 |
+
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
|
1186 |
+
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
1187 |
+
POSSIBILITY OF SUCH DAMAGE.
|
1188 |
+
|
1189 |
+
8. Some of the cuBLAS library routines were derived from
|
1190 |
+
code developed by the University of Tennessee and are
|
1191 |
+
subject to the Modified Berkeley Software Distribution
|
1192 |
+
License as follows:
|
1193 |
+
|
1194 |
+
Copyright (c) 2010 The University of Tennessee.
|
1195 |
+
|
1196 |
+
All rights reserved.
|
1197 |
+
|
1198 |
+
Redistribution and use in source and binary forms, with or without
|
1199 |
+
modification, are permitted provided that the following conditions are
|
1200 |
+
met:
|
1201 |
+
* Redistributions of source code must retain the above copyright
|
1202 |
+
notice, this list of conditions and the following disclaimer.
|
1203 |
+
* Redistributions in binary form must reproduce the above
|
1204 |
+
copyright notice, this list of conditions and the following
|
1205 |
+
disclaimer listed in this license in the documentation and/or
|
1206 |
+
other materials provided with the distribution.
|
1207 |
+
* Neither the name of the copyright holders nor the names of its
|
1208 |
+
contributors may be used to endorse or promote products derived
|
1209 |
+
from this software without specific prior written permission.
|
1210 |
+
|
1211 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1212 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1213 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1214 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1215 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1216 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1217 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1218 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1219 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1220 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1221 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1222 |
+
|
1223 |
+
9. Some of the cuBLAS library routines were written by or
|
1224 |
+
derived from code written by Jonathan Hogg and are subject
|
1225 |
+
to the Modified Berkeley Software Distribution License as
|
1226 |
+
follows:
|
1227 |
+
|
1228 |
+
Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
|
1229 |
+
|
1230 |
+
All rights reserved.
|
1231 |
+
|
1232 |
+
Redistribution and use in source and binary forms, with or without
|
1233 |
+
modification, are permitted provided that the following conditions are
|
1234 |
+
met:
|
1235 |
+
* Redistributions of source code must retain the above copyright
|
1236 |
+
notice, this list of conditions and the following disclaimer.
|
1237 |
+
* Redistributions in binary form must reproduce the above
|
1238 |
+
copyright notice, this list of conditions and the following
|
1239 |
+
disclaimer in the documentation and/or other materials provided
|
1240 |
+
with the distribution.
|
1241 |
+
* Neither the name of the STFC nor the names of its contributors
|
1242 |
+
may be used to endorse or promote products derived from this
|
1243 |
+
software without specific prior written permission.
|
1244 |
+
|
1245 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1246 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1247 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1248 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
|
1249 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
1250 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
1251 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
1252 |
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
1253 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
1254 |
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
1255 |
+
IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1256 |
+
|
1257 |
+
10. Some of the cuBLAS library routines were written by or
|
1258 |
+
derived from code written by Ahmad M. Abdelfattah, David
|
1259 |
+
Keyes, and Hatem Ltaief, and are subject to the Apache
|
1260 |
+
License, Version 2.0, as follows:
|
1261 |
+
|
1262 |
+
-- (C) Copyright 2013 King Abdullah University of Science and Technology
|
1263 |
+
Authors:
|
1264 |
+
Ahmad Abdelfattah ([email protected])
|
1265 |
+
David Keyes ([email protected])
|
1266 |
+
Hatem Ltaief ([email protected])
|
1267 |
+
|
1268 |
+
Redistribution and use in source and binary forms, with or without
|
1269 |
+
modification, are permitted provided that the following conditions
|
1270 |
+
are met:
|
1271 |
+
|
1272 |
+
* Redistributions of source code must retain the above copyright
|
1273 |
+
notice, this list of conditions and the following disclaimer.
|
1274 |
+
* Redistributions in binary form must reproduce the above copyright
|
1275 |
+
notice, this list of conditions and the following disclaimer in the
|
1276 |
+
documentation and/or other materials provided with the distribution.
|
1277 |
+
* Neither the name of the King Abdullah University of Science and
|
1278 |
+
Technology nor the names of its contributors may be used to endorse
|
1279 |
+
or promote products derived from this software without specific prior
|
1280 |
+
written permission.
|
1281 |
+
|
1282 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1283 |
+
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1284 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1285 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1286 |
+
HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1287 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1288 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1289 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1290 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1291 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1292 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
|
1293 |
+
|
1294 |
+
11. Some of the cuSPARSE library routines were written by or
|
1295 |
+
derived from code written by Li-Wen Chang and are subject
|
1296 |
+
to the NCSA Open Source License as follows:
|
1297 |
+
|
1298 |
+
Copyright (c) 2012, University of Illinois.
|
1299 |
+
|
1300 |
+
All rights reserved.
|
1301 |
+
|
1302 |
+
Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
|
1303 |
+
|
1304 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
1305 |
+
a copy of this software and associated documentation files (the
|
1306 |
+
"Software"), to deal with the Software without restriction, including
|
1307 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
1308 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
1309 |
+
permit persons to whom the Software is furnished to do so, subject to
|
1310 |
+
the following conditions:
|
1311 |
+
* Redistributions of source code must retain the above copyright
|
1312 |
+
notice, this list of conditions and the following disclaimer.
|
1313 |
+
* Redistributions in binary form must reproduce the above
|
1314 |
+
copyright notice, this list of conditions and the following
|
1315 |
+
disclaimers in the documentation and/or other materials provided
|
1316 |
+
with the distribution.
|
1317 |
+
* Neither the names of IMPACT Group, University of Illinois, nor
|
1318 |
+
the names of its contributors may be used to endorse or promote
|
1319 |
+
products derived from this Software without specific prior
|
1320 |
+
written permission.
|
1321 |
+
|
1322 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
1323 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
1324 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
1325 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
|
1326 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
1327 |
+
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
1328 |
+
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
|
1329 |
+
SOFTWARE.
|
1330 |
+
|
1331 |
+
12. Some of the cuRAND library routines were written by or
|
1332 |
+
derived from code written by Mutsuo Saito and Makoto
|
1333 |
+
Matsumoto and are subject to the following license:
|
1334 |
+
|
1335 |
+
Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
|
1336 |
+
University. All rights reserved.
|
1337 |
+
|
1338 |
+
Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
|
1339 |
+
University and University of Tokyo. All rights reserved.
|
1340 |
+
|
1341 |
+
Redistribution and use in source and binary forms, with or without
|
1342 |
+
modification, are permitted provided that the following conditions are
|
1343 |
+
met:
|
1344 |
+
* Redistributions of source code must retain the above copyright
|
1345 |
+
notice, this list of conditions and the following disclaimer.
|
1346 |
+
* Redistributions in binary form must reproduce the above
|
1347 |
+
copyright notice, this list of conditions and the following
|
1348 |
+
disclaimer in the documentation and/or other materials provided
|
1349 |
+
with the distribution.
|
1350 |
+
* Neither the name of the Hiroshima University nor the names of
|
1351 |
+
its contributors may be used to endorse or promote products
|
1352 |
+
derived from this software without specific prior written
|
1353 |
+
permission.
|
1354 |
+
|
1355 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1356 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1357 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1358 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1359 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1360 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1361 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1362 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1363 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1364 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1365 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1366 |
+
|
1367 |
+
13. Some of the cuRAND library routines were derived from
|
1368 |
+
code developed by D. E. Shaw Research and are subject to
|
1369 |
+
the following license:
|
1370 |
+
|
1371 |
+
Copyright 2010-2011, D. E. Shaw Research.
|
1372 |
+
|
1373 |
+
All rights reserved.
|
1374 |
+
|
1375 |
+
Redistribution and use in source and binary forms, with or without
|
1376 |
+
modification, are permitted provided that the following conditions are
|
1377 |
+
met:
|
1378 |
+
* Redistributions of source code must retain the above copyright
|
1379 |
+
notice, this list of conditions, and the following disclaimer.
|
1380 |
+
* Redistributions in binary form must reproduce the above
|
1381 |
+
copyright notice, this list of conditions, and the following
|
1382 |
+
disclaimer in the documentation and/or other materials provided
|
1383 |
+
with the distribution.
|
1384 |
+
* Neither the name of D. E. Shaw Research nor the names of its
|
1385 |
+
contributors may be used to endorse or promote products derived
|
1386 |
+
from this software without specific prior written permission.
|
1387 |
+
|
1388 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1389 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1390 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1391 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1392 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1393 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1394 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1395 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1396 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1397 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1398 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1399 |
+
|
1400 |
+
14. Some of the Math library routines were written by or
|
1401 |
+
derived from code developed by Norbert Juffa and are
|
1402 |
+
subject to the following license:
|
1403 |
+
|
1404 |
+
Copyright (c) 2015-2017, Norbert Juffa
|
1405 |
+
All rights reserved.
|
1406 |
+
|
1407 |
+
Redistribution and use in source and binary forms, with or without
|
1408 |
+
modification, are permitted provided that the following conditions
|
1409 |
+
are met:
|
1410 |
+
|
1411 |
+
1. Redistributions of source code must retain the above copyright
|
1412 |
+
notice, this list of conditions and the following disclaimer.
|
1413 |
+
|
1414 |
+
2. Redistributions in binary form must reproduce the above copyright
|
1415 |
+
notice, this list of conditions and the following disclaimer in the
|
1416 |
+
documentation and/or other materials provided with the distribution.
|
1417 |
+
|
1418 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1419 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1420 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1421 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1422 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1423 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1424 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1425 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1426 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1427 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1428 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1429 |
+
|
1430 |
+
15. Licensee's use of the lz4 third party component is
|
1431 |
+
subject to the following terms and conditions:
|
1432 |
+
|
1433 |
+
Copyright (C) 2011-2013, Yann Collet.
|
1434 |
+
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
|
1435 |
+
|
1436 |
+
Redistribution and use in source and binary forms, with or without
|
1437 |
+
modification, are permitted provided that the following conditions are
|
1438 |
+
met:
|
1439 |
+
|
1440 |
+
* Redistributions of source code must retain the above copyright
|
1441 |
+
notice, this list of conditions and the following disclaimer.
|
1442 |
+
* Redistributions in binary form must reproduce the above
|
1443 |
+
copyright notice, this list of conditions and the following disclaimer
|
1444 |
+
in the documentation and/or other materials provided with the
|
1445 |
+
distribution.
|
1446 |
+
|
1447 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
1448 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
1449 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
1450 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
1451 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
1452 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
1453 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
1454 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
1455 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
1456 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
1457 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1458 |
+
|
1459 |
+
16. The NPP library uses code from the Boost Math Toolkit,
|
1460 |
+
and is subject to the following license:
|
1461 |
+
|
1462 |
+
Boost Software License - Version 1.0 - August 17th, 2003
|
1463 |
+
. . . .
|
1464 |
+
|
1465 |
+
Permission is hereby granted, free of charge, to any person or
|
1466 |
+
organization obtaining a copy of the software and accompanying
|
1467 |
+
documentation covered by this license (the "Software") to use,
|
1468 |
+
reproduce, display, distribute, execute, and transmit the Software,
|
1469 |
+
and to prepare derivative works of the Software, and to permit
|
1470 |
+
third-parties to whom the Software is furnished to do so, all
|
1471 |
+
subject to the following:
|
1472 |
+
|
1473 |
+
The copyright notices in the Software and this entire statement,
|
1474 |
+
including the above license grant, this restriction and the following
|
1475 |
+
disclaimer, must be included in all copies of the Software, in whole
|
1476 |
+
or in part, and all derivative works of the Software, unless such
|
1477 |
+
copies or derivative works are solely in the form of machine-executable
|
1478 |
+
object code generated by a source language processor.
|
1479 |
+
|
1480 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
1481 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
1482 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
|
1483 |
+
NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
|
1484 |
+
ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
|
1485 |
+
OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
|
1486 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
1487 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
1488 |
+
|
1489 |
+
17. Portions of the Nsight Eclipse Edition is subject to the
|
1490 |
+
following license:
|
1491 |
+
|
1492 |
+
The Eclipse Foundation makes available all content in this plug-in
|
1493 |
+
("Content"). Unless otherwise indicated below, the Content is provided
|
1494 |
+
to you under the terms and conditions of the Eclipse Public License
|
1495 |
+
Version 1.0 ("EPL"). A copy of the EPL is available at http://
|
1496 |
+
www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
|
1497 |
+
will mean the Content.
|
1498 |
+
|
1499 |
+
If you did not receive this Content directly from the Eclipse
|
1500 |
+
Foundation, the Content is being redistributed by another party
|
1501 |
+
("Redistributor") and different terms and conditions may apply to your
|
1502 |
+
use of any object code in the Content. Check the Redistributor's
|
1503 |
+
license that was provided with the Content. If no such license exists,
|
1504 |
+
contact the Redistributor. Unless otherwise indicated below, the terms
|
1505 |
+
and conditions of the EPL still apply to any source code in the
|
1506 |
+
Content and such source code may be obtained at http://www.eclipse.org.
|
1507 |
+
|
1508 |
+
18. Some of the cuBLAS library routines uses code from
|
1509 |
+
OpenAI, which is subject to the following license:
|
1510 |
+
|
1511 |
+
License URL
|
1512 |
+
https://github.com/openai/openai-gemm/blob/master/LICENSE
|
1513 |
+
|
1514 |
+
License Text
|
1515 |
+
The MIT License
|
1516 |
+
|
1517 |
+
Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
|
1518 |
+
|
1519 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
1520 |
+
of this software and associated documentation files (the "Software"), to deal
|
1521 |
+
in the Software without restriction, including without limitation the rights
|
1522 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
1523 |
+
copies of the Software, and to permit persons to whom the Software is
|
1524 |
+
furnished to do so, subject to the following conditions:
|
1525 |
+
|
1526 |
+
The above copyright notice and this permission notice shall be included in
|
1527 |
+
all copies or substantial portions of the Software.
|
1528 |
+
|
1529 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
1530 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1531 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
1532 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
1533 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
1534 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
1535 |
+
THE SOFTWARE.
|
1536 |
+
|
1537 |
+
19. Licensee's use of the Visual Studio Setup Configuration
|
1538 |
+
Samples is subject to the following license:
|
1539 |
+
|
1540 |
+
The MIT License (MIT)
|
1541 |
+
Copyright (C) Microsoft Corporation. All rights reserved.
|
1542 |
+
|
1543 |
+
Permission is hereby granted, free of charge, to any person
|
1544 |
+
obtaining a copy of this software and associated documentation
|
1545 |
+
files (the "Software"), to deal in the Software without restriction,
|
1546 |
+
including without limitation the rights to use, copy, modify, merge,
|
1547 |
+
publish, distribute, sublicense, and/or sell copies of the Software,
|
1548 |
+
and to permit persons to whom the Software is furnished to do so,
|
1549 |
+
subject to the following conditions:
|
1550 |
+
|
1551 |
+
The above copyright notice and this permission notice shall be included
|
1552 |
+
in all copies or substantial portions of the Software.
|
1553 |
+
|
1554 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
1555 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
1556 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
1557 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
1558 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
1559 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1560 |
+
|
1561 |
+
20. Licensee's use of linmath.h header for CPU functions for
|
1562 |
+
GL vector/matrix operations from lunarG is subject to the
|
1563 |
+
Apache License Version 2.0.
|
1564 |
+
|
1565 |
+
21. The DX12-CUDA sample uses the d3dx12.h header, which is
|
1566 |
+
subject to the MIT license .
|
1567 |
+
|
1568 |
+
-----------------
|
llmeval-env/lib/python3.10/site-packages/torch/_refs/fft.py
ADDED
@@ -0,0 +1,590 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
from typing import Iterable, List, Literal, NamedTuple, Optional, Sequence, Tuple, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import torch._prims as prims
|
7 |
+
import torch._prims_common as utils
|
8 |
+
from torch._decomp import register_decomposition
|
9 |
+
from torch._prims_common import DimsType, ShapeType, TensorLikeType
|
10 |
+
from torch._prims_common.wrappers import _maybe_convert_to_dtype, out_wrapper
|
11 |
+
|
12 |
+
__all__ = [
|
13 |
+
# Transforms
|
14 |
+
"fft",
|
15 |
+
"fft2",
|
16 |
+
"fftn",
|
17 |
+
"hfft",
|
18 |
+
"hfft2",
|
19 |
+
"hfftn",
|
20 |
+
"rfft",
|
21 |
+
"rfft2",
|
22 |
+
"rfftn",
|
23 |
+
"ifft",
|
24 |
+
"ifft2",
|
25 |
+
"ifftn",
|
26 |
+
"ihfft",
|
27 |
+
"ihfft2",
|
28 |
+
"ihfftn",
|
29 |
+
"irfft",
|
30 |
+
"irfft2",
|
31 |
+
"irfftn",
|
32 |
+
# Helpers
|
33 |
+
"fftshift",
|
34 |
+
"ifftshift",
|
35 |
+
]
|
36 |
+
|
37 |
+
NormType = Union[None, Literal["forward", "backward", "ortho"]]
|
38 |
+
_NORM_VALUES = {None, "forward", "backward", "ortho"}
|
39 |
+
aten = torch._ops.ops.aten
|
40 |
+
|
41 |
+
|
42 |
+
def _apply_norm(
|
43 |
+
x: TensorLikeType, norm: NormType, signal_numel: int, forward: bool
|
44 |
+
) -> TensorLikeType:
|
45 |
+
"""Apply normalization to the un-normalized FFT result"""
|
46 |
+
torch._check(norm in _NORM_VALUES, lambda: f"Invalid normalization mode: {norm}")
|
47 |
+
|
48 |
+
if norm == "ortho":
|
49 |
+
return x * (1 / math.sqrt(signal_numel))
|
50 |
+
|
51 |
+
normalize = (not forward and (norm is None or norm == "backward")) or (
|
52 |
+
forward and norm == "forward"
|
53 |
+
)
|
54 |
+
return x * (1 / signal_numel) if normalize else x
|
55 |
+
|
56 |
+
|
57 |
+
def _promote_type_fft(
|
58 |
+
dtype: torch.dtype, require_complex: bool, device: torch.device
|
59 |
+
) -> torch.dtype:
|
60 |
+
"""Helper to promote a dtype to one supported by the FFT primitives"""
|
61 |
+
if dtype.is_complex:
|
62 |
+
return dtype
|
63 |
+
|
64 |
+
# Promote integral to default float type
|
65 |
+
if not dtype.is_floating_point:
|
66 |
+
dtype = torch.get_default_dtype()
|
67 |
+
|
68 |
+
allowed_types = [torch.float32, torch.float64]
|
69 |
+
maybe_support_half = device.type in ["cuda", "meta"]
|
70 |
+
|
71 |
+
if maybe_support_half:
|
72 |
+
allowed_types.append(torch.float16)
|
73 |
+
torch._check(dtype in allowed_types, lambda: f"Unsupported dtype {dtype}")
|
74 |
+
|
75 |
+
if require_complex:
|
76 |
+
dtype = utils.corresponding_complex_dtype(dtype)
|
77 |
+
|
78 |
+
return dtype
|
79 |
+
|
80 |
+
|
81 |
+
def _maybe_promote_tensor_fft(
|
82 |
+
t: TensorLikeType, require_complex: bool = False
|
83 |
+
) -> TensorLikeType:
|
84 |
+
"""Helper to promote a tensor to a dtype supported by the FFT primitives"""
|
85 |
+
cur_type = t.dtype
|
86 |
+
new_type = _promote_type_fft(cur_type, require_complex, t.device)
|
87 |
+
return _maybe_convert_to_dtype(t, new_type) # type: ignore[return-value]
|
88 |
+
|
89 |
+
|
90 |
+
def _resize_fft_input(
|
91 |
+
x: TensorLikeType, dims: Tuple[int, ...], sizes: Tuple[int, ...]
|
92 |
+
) -> TensorLikeType:
|
93 |
+
"""
|
94 |
+
Fixes the shape of x such that x.size(dims[i]) == sizes[i],
|
95 |
+
either by zero-padding, or by slicing x starting from 0.
|
96 |
+
"""
|
97 |
+
assert len(dims) == len(sizes)
|
98 |
+
must_copy = False
|
99 |
+
x_sizes = x.shape
|
100 |
+
pad_amount = [0] * len(x_sizes) * 2
|
101 |
+
for i in range(len(dims)):
|
102 |
+
if sizes[i] == -1:
|
103 |
+
continue
|
104 |
+
|
105 |
+
if x_sizes[dims[i]] < sizes[i]:
|
106 |
+
must_copy = True
|
107 |
+
pad_idx = len(pad_amount) - 2 * dims[i] - 1
|
108 |
+
pad_amount[pad_idx] = sizes[i] - x_sizes[dims[i]]
|
109 |
+
|
110 |
+
if x_sizes[dims[i]] > sizes[i]:
|
111 |
+
x = x.narrow(dims[i], 0, sizes[i])
|
112 |
+
|
113 |
+
return torch.constant_pad_nd(x, pad_amount) if must_copy else x
|
114 |
+
|
115 |
+
|
116 |
+
def _fft_c2r(
|
117 |
+
func_name: str,
|
118 |
+
input: TensorLikeType,
|
119 |
+
n: Optional[int],
|
120 |
+
dim: int,
|
121 |
+
norm: NormType,
|
122 |
+
forward: bool,
|
123 |
+
) -> TensorLikeType:
|
124 |
+
"""Common code for performing any complex to real FFT (irfft or hfft)"""
|
125 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
126 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
127 |
+
last_dim_size = n if n is not None else 2 * (input.shape[dim] - 1)
|
128 |
+
torch._check(
|
129 |
+
last_dim_size >= 1,
|
130 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
131 |
+
)
|
132 |
+
|
133 |
+
if n is not None:
|
134 |
+
input = _resize_fft_input(input, dims=dims, sizes=(last_dim_size // 2 + 1,))
|
135 |
+
|
136 |
+
if forward:
|
137 |
+
input = torch.conj(input)
|
138 |
+
|
139 |
+
output = prims.fft_c2r(input, dim=dims, last_dim_size=last_dim_size)
|
140 |
+
return _apply_norm(output, norm=norm, signal_numel=last_dim_size, forward=forward)
|
141 |
+
|
142 |
+
|
143 |
+
def _fft_r2c(
|
144 |
+
func_name: str,
|
145 |
+
input: TensorLikeType,
|
146 |
+
n: Optional[int],
|
147 |
+
dim: int,
|
148 |
+
norm: NormType,
|
149 |
+
forward: bool,
|
150 |
+
onesided: bool,
|
151 |
+
) -> TensorLikeType:
|
152 |
+
"""Common code for performing any real to complex FFT (rfft or ihfft)"""
|
153 |
+
torch._check(
|
154 |
+
not input.dtype.is_complex,
|
155 |
+
lambda: f"{func_name} expects a floating point input tensor, but got {input.dtype}",
|
156 |
+
)
|
157 |
+
input = _maybe_promote_tensor_fft(input)
|
158 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
159 |
+
dim_size = n if n is not None else input.shape[dim]
|
160 |
+
torch._check(
|
161 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
162 |
+
)
|
163 |
+
|
164 |
+
if n is not None:
|
165 |
+
input = _resize_fft_input(input, dims, (n,))
|
166 |
+
|
167 |
+
ret = prims.fft_r2c(input, dim=dims, onesided=onesided)
|
168 |
+
ret = _apply_norm(ret, norm, dim_size, forward)
|
169 |
+
return ret if forward else torch.conj(ret)
|
170 |
+
|
171 |
+
|
172 |
+
def _fft_c2c(
|
173 |
+
func_name: str,
|
174 |
+
input: TensorLikeType,
|
175 |
+
n: Optional[int],
|
176 |
+
dim: int,
|
177 |
+
norm: NormType,
|
178 |
+
forward: bool,
|
179 |
+
) -> TensorLikeType:
|
180 |
+
"""Common code for performing any complex to complex FFT (fft or ifft)"""
|
181 |
+
torch._check(
|
182 |
+
input.dtype.is_complex,
|
183 |
+
lambda: f"{func_name} expects a complex input tensor, but got {input.dtype}",
|
184 |
+
)
|
185 |
+
dims = (utils.canonicalize_dim(input.ndim, dim, wrap_scalar=False),)
|
186 |
+
dim_size = n if n is not None else input.shape[dim]
|
187 |
+
torch._check(
|
188 |
+
dim_size >= 1, lambda: f"Invalid number of data points ({dim_size}) specified"
|
189 |
+
)
|
190 |
+
|
191 |
+
if n is not None:
|
192 |
+
input = _resize_fft_input(input, dims, (n,))
|
193 |
+
|
194 |
+
ret = prims.fft_c2c(input, dim=dims, forward=forward)
|
195 |
+
return _apply_norm(ret, norm, dim_size, forward)
|
196 |
+
|
197 |
+
|
198 |
+
@register_decomposition(aten.fft_fft)
|
199 |
+
@out_wrapper()
|
200 |
+
def fft(
|
201 |
+
input: TensorLikeType,
|
202 |
+
n: Optional[int] = None,
|
203 |
+
dim: int = -1,
|
204 |
+
norm: NormType = None,
|
205 |
+
) -> TensorLikeType:
|
206 |
+
if input.dtype.is_complex:
|
207 |
+
return _fft_c2c("fft", input, n, dim, norm, forward=True)
|
208 |
+
else:
|
209 |
+
return _fft_r2c("fft", input, n, dim, norm, forward=True, onesided=False)
|
210 |
+
|
211 |
+
|
212 |
+
@register_decomposition(aten.fft_ifft)
|
213 |
+
@out_wrapper()
|
214 |
+
def ifft(
|
215 |
+
input: TensorLikeType,
|
216 |
+
n: Optional[int] = None,
|
217 |
+
dim: int = -1,
|
218 |
+
norm: NormType = None,
|
219 |
+
) -> TensorLikeType:
|
220 |
+
if input.dtype.is_complex:
|
221 |
+
return _fft_c2c("ifft", input, n, dim, norm, forward=False)
|
222 |
+
else:
|
223 |
+
return _fft_r2c("ifft", input, n, dim, norm, forward=False, onesided=False)
|
224 |
+
|
225 |
+
|
226 |
+
@register_decomposition(aten.fft_rfft)
|
227 |
+
@out_wrapper()
|
228 |
+
def rfft(
|
229 |
+
input: TensorLikeType,
|
230 |
+
n: Optional[int] = None,
|
231 |
+
dim: int = -1,
|
232 |
+
norm: NormType = None,
|
233 |
+
) -> TensorLikeType:
|
234 |
+
return _fft_r2c("rfft", input, n, dim, norm, forward=True, onesided=True)
|
235 |
+
|
236 |
+
|
237 |
+
@register_decomposition(aten.fft_irfft)
|
238 |
+
@out_wrapper()
|
239 |
+
def irfft(
|
240 |
+
input: TensorLikeType,
|
241 |
+
n: Optional[int] = None,
|
242 |
+
dim: int = -1,
|
243 |
+
norm: NormType = None,
|
244 |
+
) -> TensorLikeType:
|
245 |
+
return _fft_c2r("irfft", input, n, dim, norm, forward=False)
|
246 |
+
|
247 |
+
|
248 |
+
@register_decomposition(aten.fft_hfft)
|
249 |
+
@out_wrapper()
|
250 |
+
def hfft(
|
251 |
+
input: TensorLikeType,
|
252 |
+
n: Optional[int] = None,
|
253 |
+
dim: int = -1,
|
254 |
+
norm: NormType = None,
|
255 |
+
) -> TensorLikeType:
|
256 |
+
return _fft_c2r("hfft", input, n, dim, norm, forward=True)
|
257 |
+
|
258 |
+
|
259 |
+
@register_decomposition(aten.fft_ihfft)
|
260 |
+
@out_wrapper()
|
261 |
+
def ihfft(
|
262 |
+
input: TensorLikeType,
|
263 |
+
n: Optional[int] = None,
|
264 |
+
dim: int = -1,
|
265 |
+
norm: NormType = None,
|
266 |
+
) -> TensorLikeType:
|
267 |
+
return _fft_r2c("ihfft", input, n, dim, norm, forward=False, onesided=True)
|
268 |
+
|
269 |
+
|
270 |
+
class _ShapeAndDims(NamedTuple):
|
271 |
+
shape: Tuple[int, ...]
|
272 |
+
dims: Tuple[int, ...]
|
273 |
+
|
274 |
+
|
275 |
+
def _canonicalize_fft_shape_and_dim_args(
|
276 |
+
input: TensorLikeType, shape: Optional[ShapeType], dim: Optional[DimsType]
|
277 |
+
) -> _ShapeAndDims:
|
278 |
+
"""Convert the shape and dim arguments into a canonical form where neither are optional"""
|
279 |
+
input_dim = input.ndim
|
280 |
+
input_sizes = input.shape
|
281 |
+
|
282 |
+
if dim is not None:
|
283 |
+
if not isinstance(dim, Sequence):
|
284 |
+
dim = (dim,)
|
285 |
+
ret_dims = utils.canonicalize_dims(input_dim, dim, wrap_scalar=False)
|
286 |
+
|
287 |
+
# Check dims are unique
|
288 |
+
torch._check(
|
289 |
+
len(set(ret_dims)) == len(ret_dims), lambda: "FFT dims must be unique"
|
290 |
+
)
|
291 |
+
|
292 |
+
if shape is not None:
|
293 |
+
if not isinstance(shape, Sequence):
|
294 |
+
shape = (shape,)
|
295 |
+
|
296 |
+
# Has shape, might have dim
|
297 |
+
torch._check(
|
298 |
+
dim is None or len(dim) == len(shape),
|
299 |
+
lambda: "When given, dim and shape arguments must have the same length",
|
300 |
+
)
|
301 |
+
transform_ndim = len(shape)
|
302 |
+
|
303 |
+
torch._check(
|
304 |
+
transform_ndim <= input_dim,
|
305 |
+
lambda: f"Got shape with {transform_ndim} values but input tensor "
|
306 |
+
f"only has {input_dim} dimensions.",
|
307 |
+
)
|
308 |
+
|
309 |
+
# If shape is given, dims defaults to the last len(shape) dimensions
|
310 |
+
if dim is None:
|
311 |
+
ret_dims = tuple(range(input_dim - transform_ndim, input_dim))
|
312 |
+
|
313 |
+
# Translate any -1 values in shape to the default length
|
314 |
+
ret_shape = tuple(
|
315 |
+
s if s != -1 else input_sizes[d] for (s, d) in zip(shape, ret_dims) # type: ignore[possibly-undefined]
|
316 |
+
)
|
317 |
+
elif dim is None:
|
318 |
+
# No shape, no dim
|
319 |
+
ret_dims = tuple(range(input_dim))
|
320 |
+
ret_shape = tuple(input_sizes)
|
321 |
+
else:
|
322 |
+
# No shape, has dim
|
323 |
+
ret_shape = tuple(input_sizes[d] for d in ret_dims) # type: ignore[possibly-undefined]
|
324 |
+
|
325 |
+
for n in ret_shape:
|
326 |
+
torch._check(n > 0, lambda: f"Invalid number of data points ({n}) specified")
|
327 |
+
|
328 |
+
return _ShapeAndDims(shape=ret_shape, dims=ret_dims) # type: ignore[possibly-undefined]
|
329 |
+
|
330 |
+
|
331 |
+
def _prod(xs: Iterable[int]) -> int:
|
332 |
+
"""Compute product of a list"""
|
333 |
+
prod = 1
|
334 |
+
for x in xs:
|
335 |
+
prod *= x
|
336 |
+
return prod
|
337 |
+
|
338 |
+
|
339 |
+
def _fftn_c2c(
|
340 |
+
function_name: str,
|
341 |
+
input: TensorLikeType,
|
342 |
+
shape: Tuple[int, ...],
|
343 |
+
dim: Tuple[int, ...],
|
344 |
+
norm: NormType,
|
345 |
+
forward: bool,
|
346 |
+
) -> TensorLikeType:
|
347 |
+
"""Common code for n-dimensional complex to complex FFTs (fftn or ifftn)"""
|
348 |
+
torch._check(
|
349 |
+
input.dtype.is_complex,
|
350 |
+
lambda: f"{function_name} expects a complex input tensor, "
|
351 |
+
f"but got {input.dtype}",
|
352 |
+
)
|
353 |
+
x = _resize_fft_input(input, dim, shape)
|
354 |
+
output = prims.fft_c2c(x, dim=dim, forward=forward)
|
355 |
+
return _apply_norm(output, norm=norm, signal_numel=_prod(shape), forward=forward)
|
356 |
+
|
357 |
+
|
358 |
+
@register_decomposition(aten.fft_fftn)
|
359 |
+
@out_wrapper()
|
360 |
+
def fftn(
|
361 |
+
input: TensorLikeType,
|
362 |
+
s: Optional[ShapeType] = None,
|
363 |
+
dim: Optional[DimsType] = None,
|
364 |
+
norm: NormType = None,
|
365 |
+
) -> TensorLikeType:
|
366 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
367 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
368 |
+
return _fftn_c2c("fftn", x, shape, dim, norm, forward=True)
|
369 |
+
|
370 |
+
|
371 |
+
@register_decomposition(aten.fft_ifftn)
|
372 |
+
@out_wrapper()
|
373 |
+
def ifftn(
|
374 |
+
input: TensorLikeType,
|
375 |
+
s: Optional[ShapeType] = None,
|
376 |
+
dim: Optional[DimsType] = None,
|
377 |
+
norm: NormType = None,
|
378 |
+
) -> TensorLikeType:
|
379 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
380 |
+
x = _maybe_promote_tensor_fft(input, require_complex=True)
|
381 |
+
return _fftn_c2c("ifftn", x, shape, dim, norm, forward=False)
|
382 |
+
|
383 |
+
|
384 |
+
@register_decomposition(aten.fft_rfftn)
|
385 |
+
@out_wrapper()
|
386 |
+
def rfftn(
|
387 |
+
input: TensorLikeType,
|
388 |
+
s: Optional[ShapeType] = None,
|
389 |
+
dim: Optional[DimsType] = None,
|
390 |
+
norm: NormType = None,
|
391 |
+
) -> TensorLikeType:
|
392 |
+
torch._check(
|
393 |
+
not input.dtype.is_complex,
|
394 |
+
lambda: f"rfftn expects a real-valued input tensor, but got {input.dtype}",
|
395 |
+
)
|
396 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
397 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
398 |
+
input = _resize_fft_input(input, dim, shape)
|
399 |
+
out = prims.fft_r2c(input, dim=dim, onesided=True)
|
400 |
+
return _apply_norm(out, norm=norm, signal_numel=_prod(shape), forward=True)
|
401 |
+
|
402 |
+
|
403 |
+
@register_decomposition(aten.fft_ihfftn)
|
404 |
+
@out_wrapper()
|
405 |
+
def ihfftn(
|
406 |
+
input: TensorLikeType,
|
407 |
+
s: Optional[ShapeType] = None,
|
408 |
+
dim: Optional[DimsType] = None,
|
409 |
+
norm: NormType = None,
|
410 |
+
) -> TensorLikeType:
|
411 |
+
torch._check(
|
412 |
+
not input.dtype.is_complex,
|
413 |
+
lambda: f"ihfftn expects a real-valued input tensor, but got {input.dtype}",
|
414 |
+
)
|
415 |
+
shape, dim = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
416 |
+
torch._check(len(shape) > 0, lambda: "ihfftn must transform at least one axis")
|
417 |
+
input = _maybe_promote_tensor_fft(input, require_complex=False)
|
418 |
+
input = _resize_fft_input(input, dim, shape)
|
419 |
+
|
420 |
+
tmp = prims.fft_r2c(input, dim=dim[-1:], onesided=True)
|
421 |
+
|
422 |
+
if len(dim) == 1:
|
423 |
+
tmp = _apply_norm(tmp, norm=norm, signal_numel=shape[0], forward=False)
|
424 |
+
return prims.conj(tmp)
|
425 |
+
|
426 |
+
tmp = prims.conj_physical(tmp)
|
427 |
+
tmp = prims.fft_c2c(tmp, dim=dim[:-1], forward=False)
|
428 |
+
return _apply_norm(tmp, norm=norm, signal_numel=_prod(shape), forward=False)
|
429 |
+
|
430 |
+
|
431 |
+
class _CanonicalizeC2rReturn(NamedTuple):
|
432 |
+
shape: Tuple[int, ...]
|
433 |
+
dim: Tuple[int, ...]
|
434 |
+
last_dim_size: int
|
435 |
+
|
436 |
+
|
437 |
+
def _canonicalize_fft_c2r_shape_and_dim_args(
|
438 |
+
fname: str,
|
439 |
+
input: TensorLikeType,
|
440 |
+
s: Optional[ShapeType],
|
441 |
+
dim: Optional[DimsType],
|
442 |
+
) -> _CanonicalizeC2rReturn:
|
443 |
+
"""Canonicalize shape and dim arguments for n-dimensional c2r transforms,
|
444 |
+
as well as calculating the last_dim_size which is shape[dim[-1]] for the output"""
|
445 |
+
(shape, dim) = _canonicalize_fft_shape_and_dim_args(input, s, dim)
|
446 |
+
torch._check(len(shape) > 0, lambda: f"{fname} must transform at least one axis")
|
447 |
+
|
448 |
+
if s is None or s[-1] == -1:
|
449 |
+
last_dim_size = 2 * (input.shape[dim[-1]] - 1)
|
450 |
+
else:
|
451 |
+
last_dim_size = shape[-1]
|
452 |
+
|
453 |
+
torch._check(
|
454 |
+
last_dim_size >= 1,
|
455 |
+
lambda: f"Invalid number of data points ({last_dim_size}) specified",
|
456 |
+
)
|
457 |
+
|
458 |
+
shape_list = list(shape)
|
459 |
+
shape_list[-1] = last_dim_size // 2 + 1
|
460 |
+
return _CanonicalizeC2rReturn(
|
461 |
+
shape=tuple(shape_list), dim=dim, last_dim_size=last_dim_size
|
462 |
+
)
|
463 |
+
|
464 |
+
|
465 |
+
@register_decomposition(aten.fft_irfftn)
|
466 |
+
@out_wrapper()
|
467 |
+
def irfftn(
|
468 |
+
input: TensorLikeType,
|
469 |
+
s: Optional[ShapeType] = None,
|
470 |
+
dim: Optional[DimsType] = None,
|
471 |
+
norm: NormType = None,
|
472 |
+
) -> TensorLikeType:
|
473 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
474 |
+
"irfftn", input, s, dim
|
475 |
+
)
|
476 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
477 |
+
input = _resize_fft_input(input, dim, shape)
|
478 |
+
out = prims.fft_c2r(input, dim=dim, last_dim_size=last_dim_size)
|
479 |
+
return _apply_norm(out, norm, _prod(out.shape[d] for d in dim), forward=False)
|
480 |
+
|
481 |
+
|
482 |
+
@register_decomposition(aten.fft_hfftn)
|
483 |
+
@out_wrapper()
|
484 |
+
def hfftn(
|
485 |
+
input: TensorLikeType,
|
486 |
+
s: Optional[ShapeType] = None,
|
487 |
+
dim: Optional[DimsType] = None,
|
488 |
+
norm: NormType = None,
|
489 |
+
) -> TensorLikeType:
|
490 |
+
shape, dim, last_dim_size = _canonicalize_fft_c2r_shape_and_dim_args(
|
491 |
+
"hfftn", input, s, dim
|
492 |
+
)
|
493 |
+
input = _maybe_promote_tensor_fft(input, require_complex=True)
|
494 |
+
input = _resize_fft_input(input, dim, shape)
|
495 |
+
|
496 |
+
tmp = prims.fft_c2c(input, dim=dim[:-1], forward=True) if len(dim) > 1 else input
|
497 |
+
tmp = _apply_norm(tmp, norm, _prod(shape[:-1]), forward=True)
|
498 |
+
tmp = prims.conj_physical(tmp)
|
499 |
+
out = prims.fft_c2r(tmp, dim=dim[-1:], last_dim_size=last_dim_size)
|
500 |
+
return _apply_norm(out, norm, last_dim_size, forward=True)
|
501 |
+
|
502 |
+
|
503 |
+
@register_decomposition(aten.fft_fft2)
|
504 |
+
@out_wrapper()
|
505 |
+
def fft2(
|
506 |
+
input: TensorLikeType,
|
507 |
+
s: Optional[ShapeType] = None,
|
508 |
+
dim: Optional[DimsType] = (-2, -1),
|
509 |
+
norm: NormType = None,
|
510 |
+
) -> TensorLikeType:
|
511 |
+
return torch.fft.fftn(input, s=s, dim=dim, norm=norm)
|
512 |
+
|
513 |
+
|
514 |
+
@register_decomposition(aten.fft_ifft2)
|
515 |
+
@out_wrapper()
|
516 |
+
def ifft2(
|
517 |
+
input: TensorLikeType,
|
518 |
+
s: Optional[ShapeType] = None,
|
519 |
+
dim: Optional[DimsType] = (-2, -1),
|
520 |
+
norm: NormType = None,
|
521 |
+
) -> TensorLikeType:
|
522 |
+
return torch.fft.ifftn(input, s=s, dim=dim, norm=norm)
|
523 |
+
|
524 |
+
|
525 |
+
@register_decomposition(aten.fft_rfft2)
|
526 |
+
@out_wrapper()
|
527 |
+
def rfft2(
|
528 |
+
input: TensorLikeType,
|
529 |
+
s: Optional[ShapeType] = None,
|
530 |
+
dim: Optional[DimsType] = (-2, -1),
|
531 |
+
norm: NormType = None,
|
532 |
+
) -> TensorLikeType:
|
533 |
+
return torch.fft.rfftn(input, s=s, dim=dim, norm=norm)
|
534 |
+
|
535 |
+
|
536 |
+
@register_decomposition(aten.fft_irfft2)
|
537 |
+
@out_wrapper()
|
538 |
+
def irfft2(
|
539 |
+
input: TensorLikeType,
|
540 |
+
s: Optional[ShapeType] = None,
|
541 |
+
dim: Optional[DimsType] = (-2, -1),
|
542 |
+
norm: NormType = None,
|
543 |
+
) -> TensorLikeType:
|
544 |
+
return torch.fft.irfftn(input, s=s, dim=dim, norm=norm)
|
545 |
+
|
546 |
+
|
547 |
+
@register_decomposition(aten.fft_hfft2)
|
548 |
+
@out_wrapper()
|
549 |
+
def hfft2(
|
550 |
+
input: TensorLikeType,
|
551 |
+
s: Optional[ShapeType] = None,
|
552 |
+
dim: Optional[DimsType] = (-2, -1),
|
553 |
+
norm: NormType = None,
|
554 |
+
) -> TensorLikeType:
|
555 |
+
return torch.fft.hfftn(input, s=s, dim=dim, norm=norm)
|
556 |
+
|
557 |
+
|
558 |
+
@register_decomposition(aten.fft_ihfft2)
|
559 |
+
@out_wrapper()
|
560 |
+
def ihfft2(
|
561 |
+
input: TensorLikeType,
|
562 |
+
s: Optional[ShapeType] = None,
|
563 |
+
dim: Optional[DimsType] = (-2, -1),
|
564 |
+
norm: NormType = None,
|
565 |
+
) -> TensorLikeType:
|
566 |
+
return torch.fft.ihfftn(input, s=s, dim=dim, norm=norm)
|
567 |
+
|
568 |
+
|
569 |
+
def _default_alldims(dim: Optional[DimsType], x: TensorLikeType) -> List[int]:
|
570 |
+
"""Convert Optional[DimsType] to a simple list, defaulting to all dimensions"""
|
571 |
+
if dim is None:
|
572 |
+
return list(range(x.ndim))
|
573 |
+
elif not isinstance(dim, Sequence):
|
574 |
+
return [dim]
|
575 |
+
else:
|
576 |
+
return list(dim)
|
577 |
+
|
578 |
+
|
579 |
+
@register_decomposition(aten.fft_fftshift)
|
580 |
+
def fftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
581 |
+
dims = _default_alldims(dim, input)
|
582 |
+
shift = [input.shape[d] // 2 for d in dims]
|
583 |
+
return torch.roll(input, shift, dims)
|
584 |
+
|
585 |
+
|
586 |
+
@register_decomposition(aten.fft_ifftshift)
|
587 |
+
def ifftshift(input: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType:
|
588 |
+
dims = _default_alldims(dim, input)
|
589 |
+
shift = [(input.shape[d] + 1) // 2 for d in dims]
|
590 |
+
return torch.roll(input, shift, dims)
|
llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (278 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/_refs/special/__init__.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
from typing import Optional, Union
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch._prims as prims
|
6 |
+
import torch._prims_common as utils
|
7 |
+
import torch._refs as refs
|
8 |
+
|
9 |
+
from torch import Tensor
|
10 |
+
from torch._decomp import register_decomposition
|
11 |
+
from torch._prims_common import (
|
12 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND,
|
13 |
+
Number,
|
14 |
+
NumberType,
|
15 |
+
TensorLike,
|
16 |
+
TensorLikeType,
|
17 |
+
)
|
18 |
+
from torch._prims_common.wrappers import elementwise_type_promotion_wrapper, out_wrapper
|
19 |
+
from torch._refs import (
|
20 |
+
_make_alias,
|
21 |
+
_make_elementwise_binary_reference,
|
22 |
+
_make_elementwise_unary_reference,
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
__all__ = [
|
27 |
+
"bessel_j0",
|
28 |
+
"bessel_j1",
|
29 |
+
"entr",
|
30 |
+
"erfcx",
|
31 |
+
"expit",
|
32 |
+
"i0e",
|
33 |
+
"i1",
|
34 |
+
"i1e",
|
35 |
+
"log_ndtr",
|
36 |
+
"logit",
|
37 |
+
"log_softmax",
|
38 |
+
"multigammaln",
|
39 |
+
"ndtr",
|
40 |
+
"ndtri",
|
41 |
+
"softmax",
|
42 |
+
"spherical_bessel_j0",
|
43 |
+
"xlog1py",
|
44 |
+
"zeta",
|
45 |
+
]
|
46 |
+
aten = torch._ops.ops.aten
|
47 |
+
|
48 |
+
|
49 |
+
@_make_elementwise_unary_reference(
|
50 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
51 |
+
)
|
52 |
+
def bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
53 |
+
return prims.bessel_j0(a)
|
54 |
+
|
55 |
+
|
56 |
+
@_make_elementwise_unary_reference(
|
57 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
58 |
+
)
|
59 |
+
def bessel_j1(a: TensorLikeType) -> TensorLikeType:
|
60 |
+
return prims.bessel_j1(a)
|
61 |
+
|
62 |
+
|
63 |
+
@register_decomposition(aten.special_entr)
|
64 |
+
@out_wrapper()
|
65 |
+
@elementwise_type_promotion_wrapper(
|
66 |
+
type_promoting_args=("a",),
|
67 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
68 |
+
)
|
69 |
+
def entr(a: TensorLikeType) -> TensorLikeType:
|
70 |
+
return torch.where(
|
71 |
+
torch.isnan(a),
|
72 |
+
a,
|
73 |
+
torch.where(a > 0, -a * torch.log(a), torch.where(a == 0, 0, -torch.inf)),
|
74 |
+
)
|
75 |
+
|
76 |
+
|
77 |
+
@register_decomposition(aten.special_erfcx)
|
78 |
+
@out_wrapper()
|
79 |
+
@elementwise_type_promotion_wrapper(
|
80 |
+
type_promoting_args=("a",),
|
81 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
82 |
+
)
|
83 |
+
def erfcx(a: TensorLikeType) -> TensorLikeType:
|
84 |
+
return prims.erfcx(a)
|
85 |
+
|
86 |
+
|
87 |
+
# alias for sigmoid
|
88 |
+
expit = _make_alias(torch.sigmoid, "expit")
|
89 |
+
|
90 |
+
|
91 |
+
@_make_elementwise_unary_reference(
|
92 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
93 |
+
)
|
94 |
+
def i0e(a: TensorLikeType) -> TensorLikeType:
|
95 |
+
return prims.bessel_i0e(a)
|
96 |
+
|
97 |
+
|
98 |
+
@_make_elementwise_unary_reference(
|
99 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
100 |
+
)
|
101 |
+
def i1(a: TensorLikeType) -> TensorLikeType:
|
102 |
+
return prims.bessel_i1(a)
|
103 |
+
|
104 |
+
|
105 |
+
@_make_elementwise_unary_reference(
|
106 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
107 |
+
)
|
108 |
+
def i1e(a: TensorLikeType) -> TensorLikeType:
|
109 |
+
return prims.bessel_i1e(a)
|
110 |
+
|
111 |
+
|
112 |
+
@register_decomposition(aten.special_log_ndtr)
|
113 |
+
@out_wrapper()
|
114 |
+
@elementwise_type_promotion_wrapper(
|
115 |
+
type_promoting_args=("a",),
|
116 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
117 |
+
)
|
118 |
+
def log_ndtr(a: TensorLikeType) -> TensorLikeType:
|
119 |
+
# Note: M_SQRT1_2 is the value of 1 / √2
|
120 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
121 |
+
t = a * M_SQRT1_2
|
122 |
+
return torch.where(
|
123 |
+
a < 1.0,
|
124 |
+
torch.log(torch.special.erfcx(-t) / 2) - t * t,
|
125 |
+
torch.log1p(-torch.erfc(t) / 2),
|
126 |
+
)
|
127 |
+
|
128 |
+
|
129 |
+
@register_decomposition(aten.logit)
|
130 |
+
@out_wrapper()
|
131 |
+
@elementwise_type_promotion_wrapper(
|
132 |
+
type_promoting_args=("self",),
|
133 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
134 |
+
)
|
135 |
+
def logit(self: TensorLikeType, eps: Optional[float] = None) -> TensorLikeType:
|
136 |
+
if eps is None:
|
137 |
+
eps = -1.0
|
138 |
+
lo = eps
|
139 |
+
hi = 1 - eps
|
140 |
+
self = torch.clamp(self, lo, hi)
|
141 |
+
return torch.log(torch.true_divide(self, torch.sub(1, self)))
|
142 |
+
|
143 |
+
|
144 |
+
@register_decomposition(aten.special_xlog1py)
|
145 |
+
@out_wrapper()
|
146 |
+
@elementwise_type_promotion_wrapper(
|
147 |
+
type_promoting_args=("a", "b"),
|
148 |
+
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
149 |
+
)
|
150 |
+
def xlog1py(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]):
|
151 |
+
torch._check(
|
152 |
+
isinstance(a, TensorLike) or isinstance(b, TensorLike),
|
153 |
+
lambda: 'Expected either argument a or b to be a Tensor"',
|
154 |
+
)
|
155 |
+
|
156 |
+
# Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors.
|
157 |
+
if isinstance(a, TensorLike) and isinstance(b, Number):
|
158 |
+
b = refs.scalar_tensor(b, dtype=a.dtype, device=a.device)
|
159 |
+
elif isinstance(b, TensorLike) and isinstance(a, Number):
|
160 |
+
a = refs.scalar_tensor(a, dtype=b.dtype, device=b.device)
|
161 |
+
|
162 |
+
# mypy: expected "Tensor"
|
163 |
+
assert isinstance(a, TensorLike)
|
164 |
+
assert isinstance(b, TensorLike)
|
165 |
+
rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log1p(b)))
|
166 |
+
return torch.where(torch.isnan(b), float("nan"), rhs)
|
167 |
+
|
168 |
+
|
169 |
+
@register_decomposition(aten.mvlgamma)
|
170 |
+
@out_wrapper()
|
171 |
+
@elementwise_type_promotion_wrapper(
|
172 |
+
type_promoting_args=("a",),
|
173 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
174 |
+
)
|
175 |
+
def multigammaln(a: TensorLikeType, p: int) -> TensorLikeType:
|
176 |
+
c = 0.25 * p * (p - 1) * math.log(math.pi)
|
177 |
+
b = 0.5 * torch.arange(start=(1 - p), end=1, step=1, dtype=a.dtype, device=a.device)
|
178 |
+
return torch.sum(torch.lgamma(a.unsqueeze(-1) + b), dim=-1) + c
|
179 |
+
|
180 |
+
|
181 |
+
@register_decomposition(aten.special_ndtr)
|
182 |
+
@out_wrapper()
|
183 |
+
@elementwise_type_promotion_wrapper(
|
184 |
+
type_promoting_args=("a",),
|
185 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
186 |
+
)
|
187 |
+
def ndtr(a: TensorLikeType) -> TensorLikeType:
|
188 |
+
# Note: M_SQRT1_2 is the value of 1 / √2
|
189 |
+
M_SQRT1_2 = 0.707106781186547524400844362104849039
|
190 |
+
a_sqrt_2 = a * M_SQRT1_2
|
191 |
+
return (1 + torch.erf(a_sqrt_2)) * 0.5
|
192 |
+
|
193 |
+
|
194 |
+
@register_decomposition(aten.special_ndtri)
|
195 |
+
@out_wrapper()
|
196 |
+
@elementwise_type_promotion_wrapper(
|
197 |
+
type_promoting_args=("a",),
|
198 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
199 |
+
)
|
200 |
+
def ndtri(a: TensorLikeType) -> TensorLikeType:
|
201 |
+
return prims.ndtri(a)
|
202 |
+
|
203 |
+
|
204 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
205 |
+
# CompositeImplicitAutograd - don't register decomp
|
206 |
+
def log_softmax(
|
207 |
+
a: TensorLikeType,
|
208 |
+
dim: int,
|
209 |
+
dtype: Optional[torch.dtype] = None,
|
210 |
+
) -> TensorLikeType:
|
211 |
+
return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
212 |
+
|
213 |
+
|
214 |
+
# Forwarding alias: the special variant doesn't support the out kwarg
|
215 |
+
# CompositeImplicitAutograd - don't register decomp
|
216 |
+
def softmax(
|
217 |
+
a: TensorLikeType,
|
218 |
+
dim: int,
|
219 |
+
dtype: Optional[torch.dtype] = None,
|
220 |
+
) -> TensorLikeType:
|
221 |
+
return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload]
|
222 |
+
|
223 |
+
|
224 |
+
@_make_elementwise_unary_reference(
|
225 |
+
ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
226 |
+
)
|
227 |
+
def spherical_bessel_j0(a: TensorLikeType) -> TensorLikeType:
|
228 |
+
return prims.spherical_bessel_j0(a)
|
229 |
+
|
230 |
+
|
231 |
+
# TODO: add docstring
|
232 |
+
@_make_elementwise_binary_reference(
|
233 |
+
type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
|
234 |
+
)
|
235 |
+
def zeta(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType:
|
236 |
+
return prims.zeta(a, b)
|
llmeval-env/lib/python3.10/site-packages/torch/_refs/special/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/grad_scaler.cpython-310.pyc
ADDED
Binary file (23.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/bin/torch_shm_manager
ADDED
Binary file (80.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/compiler/__init__.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
__all__ = [
|
5 |
+
"compile",
|
6 |
+
"assume_constant_result",
|
7 |
+
"reset",
|
8 |
+
"allow_in_graph",
|
9 |
+
"list_backends",
|
10 |
+
"disable",
|
11 |
+
"cudagraph_mark_step_begin",
|
12 |
+
"wrap_numpy",
|
13 |
+
"is_compiling",
|
14 |
+
"is_dynamo_compiling",
|
15 |
+
]
|
16 |
+
|
17 |
+
def compile(*args, **kwargs):
|
18 |
+
"""
|
19 |
+
See :func:`torch.compile` for details on the arguments for this function.
|
20 |
+
"""
|
21 |
+
return torch.compile(*args, **kwargs)
|
22 |
+
|
23 |
+
def reset() -> None:
|
24 |
+
"""
|
25 |
+
This function clears all compilation caches and restores the system to its initial state.
|
26 |
+
It is recommended to call this function, especially after using operations like `torch.compile(...)`
|
27 |
+
to ensure a clean state before another unrelated compilation
|
28 |
+
"""
|
29 |
+
import torch._dynamo
|
30 |
+
|
31 |
+
torch._dynamo.reset()
|
32 |
+
|
33 |
+
def allow_in_graph(fn):
|
34 |
+
"""
|
35 |
+
Customize which functions compilation will include in the generated graph.
|
36 |
+
It bypasses all introspection of the symbolic python code in favor of
|
37 |
+
directly writing it to the graph.
|
38 |
+
If fn is a list or tuple of callables it recursively applies :func:`allow_in_graph()`
|
39 |
+
to each function and returns a new list or tuple containing the modified functions
|
40 |
+
|
41 |
+
Args:
|
42 |
+
fn: A callable representing the function to be included in the graph.
|
43 |
+
|
44 |
+
.. warning::
|
45 |
+
|
46 |
+
:func:`allow_in_graph` skips TorchDynamo completely on the decorated function
|
47 |
+
skipping all TorchDynamo safety checks (graph breaks, handling closures, etc).
|
48 |
+
Therefore, one has to be very careful with :func:`allow_in_graph` since subsystems
|
49 |
+
like AOT Autograd rely on torchdynamo
|
50 |
+
If not careful, this could lead to soundness and really hard-to-debug issues.
|
51 |
+
|
52 |
+
"""
|
53 |
+
import torch._dynamo
|
54 |
+
|
55 |
+
return torch._dynamo.allow_in_graph(fn)
|
56 |
+
|
57 |
+
|
58 |
+
def list_backends(exclude_tags=("debug", "experimental")) -> List[str]:
|
59 |
+
"""
|
60 |
+
Return valid strings that can be passed to `torch.compile(..., backend="name")`.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
exclude_tags(optional): A tuple of strings representing tags to exclude.
|
64 |
+
"""
|
65 |
+
import torch._dynamo
|
66 |
+
|
67 |
+
return torch._dynamo.list_backends(exclude_tags)
|
68 |
+
|
69 |
+
def assume_constant_result(fn):
|
70 |
+
"""
|
71 |
+
This function is used to mark a function `fn` as having a constant result.
|
72 |
+
This allows the compiler to optimize away your function
|
73 |
+
Returns The same function `fn`
|
74 |
+
|
75 |
+
Args:
|
76 |
+
fn: The function to be marked as having a constant result.
|
77 |
+
|
78 |
+
.. warning::
|
79 |
+
`assume_constant_result` can if invalid cause safety and soundness issues, :func:`torch.compile`
|
80 |
+
will not attempt to validate whether the constant assumption is true or not
|
81 |
+
|
82 |
+
"""
|
83 |
+
import torch._dynamo
|
84 |
+
|
85 |
+
return torch._dynamo.assume_constant_result(fn)
|
86 |
+
|
87 |
+
def disable(fn=None, recursive=True):
|
88 |
+
"""
|
89 |
+
This function provides both a decorator and a context manager to disable compilation on a function
|
90 |
+
It also provides the option of recursively disabling called functions
|
91 |
+
|
92 |
+
Args:
|
93 |
+
fn (optional): The function to disable
|
94 |
+
recursive (optional): A boolean value indicating whether the disabling should be recursive.
|
95 |
+
"""
|
96 |
+
import torch._dynamo
|
97 |
+
|
98 |
+
return torch._dynamo.disable(fn, recursive)
|
99 |
+
|
100 |
+
def cudagraph_mark_step_begin():
|
101 |
+
"""
|
102 |
+
Indicates that a new iteration of inference or training is about to begin.
|
103 |
+
|
104 |
+
CUDA Graphs will free tensors of a prior iteration. A new iteration is started on each invocation of
|
105 |
+
torch.compile, so long as there is not a pending backward that has not been called.
|
106 |
+
|
107 |
+
If that heuristic is wrong, such as in the following example, manually mark it with this api.
|
108 |
+
|
109 |
+
.. code-block:: python
|
110 |
+
|
111 |
+
@torch.compile(mode="reduce-overhead")
|
112 |
+
def rand_foo():
|
113 |
+
return torch.rand([4], device="cuda")
|
114 |
+
|
115 |
+
for _ in range(5):
|
116 |
+
torch.compiler.cudagraph_mark_step_begin()
|
117 |
+
rand_foo() + rand_foo()
|
118 |
+
|
119 |
+
For more details, see `torch.compiler_cudagraph_trees <https://pytorch.org/docs/main/torch.compiler_cudagraph_trees.html>`__
|
120 |
+
"""
|
121 |
+
from torch._inductor import cudagraph_trees
|
122 |
+
|
123 |
+
cudagraph_trees.mark_step_begin()
|
124 |
+
|
125 |
+
def wrap_numpy(fn):
|
126 |
+
r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function
|
127 |
+
from ``torch.Tensor``s to ``torch.Tensor``s.
|
128 |
+
|
129 |
+
It is designed to be used with :func:`torch.compile` with ``fullgraph=True``. It allows to
|
130 |
+
compile a NumPy function as if it were a PyTorch function. This allows you to run NumPy code
|
131 |
+
on CUDA or compute its gradients.
|
132 |
+
|
133 |
+
.. note::
|
134 |
+
|
135 |
+
This decorator does not work without :func:`torch.compile`.
|
136 |
+
|
137 |
+
Example::
|
138 |
+
|
139 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
140 |
+
>>> # Compile a NumPy function as a Tensor -> Tensor function
|
141 |
+
>>> @torch.compile(fullgraph=True)
|
142 |
+
>>> @torch.compiler.wrap_numpy
|
143 |
+
>>> def fn(a: np.ndarray):
|
144 |
+
>>> return np.sum(a * a)
|
145 |
+
>>> # Execute the NumPy function using Tensors on CUDA and compute the gradients
|
146 |
+
>>> x = torch.arange(6, dtype=torch.float32, device="cuda", requires_grad=True)
|
147 |
+
>>> out = fn(x)
|
148 |
+
>>> out.backward()
|
149 |
+
>>> print(x.grad)
|
150 |
+
tensor([ 0., 2., 4., 6., 8., 10.], device='cuda:0')
|
151 |
+
"""
|
152 |
+
from torch._dynamo.external_utils import wrap_numpy as wrap
|
153 |
+
return wrap(fn)
|
154 |
+
|
155 |
+
_is_compiling_flag: bool = False
|
156 |
+
|
157 |
+
def is_compiling() -> bool:
|
158 |
+
"""
|
159 |
+
Indicates whether a graph is executed/traced as part of torch.compile() or torch.export().
|
160 |
+
|
161 |
+
Note that there are 2 other related flags that should deprecated eventually:
|
162 |
+
* torch._dynamo.external_utils.is_compiling()
|
163 |
+
* torch._utils.is_compiling()
|
164 |
+
|
165 |
+
Example::
|
166 |
+
|
167 |
+
>>> def forward(self, x):
|
168 |
+
>>> if not torch.compiler.is_compiling():
|
169 |
+
>>> ...logic that is not needed in a compiled/traced graph...
|
170 |
+
>>>
|
171 |
+
>>> ...rest of the function...
|
172 |
+
"""
|
173 |
+
if torch.jit.is_scripting():
|
174 |
+
return False
|
175 |
+
else:
|
176 |
+
return _is_compiling_flag
|
177 |
+
|
178 |
+
def is_dynamo_compiling() -> bool:
|
179 |
+
"""
|
180 |
+
Indicates whether a graph is traced via TorchDynamo.
|
181 |
+
|
182 |
+
It's stricter than is_compiling() flag, as it would only be set to True when
|
183 |
+
TorchDynamo is used.
|
184 |
+
|
185 |
+
Example::
|
186 |
+
|
187 |
+
>>> def forward(self, x):
|
188 |
+
>>> if not torch.compiler.is_dynamo_compiling():
|
189 |
+
>>> ...logic that is not needed in a TorchDynamo-traced graph...
|
190 |
+
>>>
|
191 |
+
>>> ...rest of the function...
|
192 |
+
"""
|
193 |
+
return False
|
llmeval-env/lib/python3.10/site-packages/torch/compiler/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (6.99 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2a07646db6cdda85cca1d5170434c87be127895cdde4d45a3704578eab2b4f53
|
3 |
+
size 85435777
|
llmeval-env/lib/python3.10/site-packages/torch/nested/__init__.py
ADDED
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional, Union, Sequence
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch import SymInt, Tensor
|
5 |
+
from torch._C import _add_docstr, _nested # type: ignore[attr-defined]
|
6 |
+
|
7 |
+
from torch.types import _device as Device, _dtype as DType
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"to_padded_tensor",
|
11 |
+
"as_nested_tensor",
|
12 |
+
"nested_tensor",
|
13 |
+
"narrow",
|
14 |
+
]
|
15 |
+
|
16 |
+
# Nested Tensor constructor functions
|
17 |
+
|
18 |
+
|
19 |
+
def as_nested_tensor(
|
20 |
+
tensor_list: Sequence[Tensor],
|
21 |
+
dtype: Optional[DType] = None,
|
22 |
+
device: Optional[Device] = None,
|
23 |
+
layout=None
|
24 |
+
) -> Tensor:
|
25 |
+
r"""
|
26 |
+
Constructs a nested tensor preserving autograd history from :attr:`tensor_list` a list of tensors.
|
27 |
+
|
28 |
+
.. note::
|
29 |
+
Tensors within the list are always copied by this function due to current nested tensor semantics.
|
30 |
+
|
31 |
+
Args:
|
32 |
+
tensor_list (List[Tensor]): a list of tensors with the same ndim
|
33 |
+
|
34 |
+
Keyword arguments:
|
35 |
+
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
|
36 |
+
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
|
37 |
+
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
|
38 |
+
Default: if None, same :class:`torch.device` as leftmost tensor in the list
|
39 |
+
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
|
40 |
+
Only strided and jagged layouts are supported. Default: if None, the strided layout.
|
41 |
+
|
42 |
+
Example::
|
43 |
+
|
44 |
+
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
|
45 |
+
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
|
46 |
+
>>> nt = torch.nested.as_nested_tensor([a, b])
|
47 |
+
>>> nt.is_leaf
|
48 |
+
False
|
49 |
+
>>> fake_grad = torch.nested.nested_tensor([torch.ones_like(a), torch.zeros_like(b)])
|
50 |
+
>>> nt.backward(fake_grad)
|
51 |
+
>>> a.grad
|
52 |
+
tensor([1., 1., 1.])
|
53 |
+
>>> b.grad
|
54 |
+
tensor([0., 0., 0., 0., 0.])
|
55 |
+
"""
|
56 |
+
if not isinstance(tensor_list, list) or any(
|
57 |
+
not isinstance(t, Tensor) for t in tensor_list
|
58 |
+
):
|
59 |
+
raise TypeError(
|
60 |
+
"as_nested_tensor(): Expected first argument to be a list of tensors "
|
61 |
+
)
|
62 |
+
|
63 |
+
if layout is None:
|
64 |
+
layout = torch.strided
|
65 |
+
if layout == torch.strided:
|
66 |
+
return torch._nested_tensor_from_tensor_list(tensor_list, dtype, None, device, None)
|
67 |
+
elif layout == torch.jagged:
|
68 |
+
from torch.nested._internal.nested_tensor import jagged_from_list
|
69 |
+
|
70 |
+
nt, _ = jagged_from_list(tensor_list, offsets=None, device=device, dtype=dtype)
|
71 |
+
return nt
|
72 |
+
else:
|
73 |
+
raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
|
74 |
+
|
75 |
+
|
76 |
+
# Note: This not only adds doc strings for the nested ops, but
|
77 |
+
# also connects the torch.nested Python namespace to the torch._C._nested builtins.
|
78 |
+
|
79 |
+
to_padded_tensor = _add_docstr(
|
80 |
+
_nested.nested_to_padded_tensor,
|
81 |
+
r"""
|
82 |
+
to_padded_tensor(input, padding, output_size=None, out=None) -> Tensor
|
83 |
+
|
84 |
+
Returns a new (non-nested) Tensor by padding the :attr:`input` nested tensor.
|
85 |
+
The leading entries will be filled with the nested data,
|
86 |
+
while the trailing entries will be padded.
|
87 |
+
|
88 |
+
.. warning::
|
89 |
+
|
90 |
+
:func:`to_padded_tensor` always copies the underlying data,
|
91 |
+
since the nested and the non-nested tensors differ in memory layout.
|
92 |
+
|
93 |
+
Args:
|
94 |
+
padding (float): The padding value for the trailing entries.
|
95 |
+
|
96 |
+
Keyword args:
|
97 |
+
output_size (Tuple[int]): The size of the output tensor.
|
98 |
+
If given, it must be large enough to contain all nested data;
|
99 |
+
else, will infer by taking the max size of each nested sub-tensor along each dimension.
|
100 |
+
out (Tensor, optional): the output tensor.
|
101 |
+
|
102 |
+
Example::
|
103 |
+
|
104 |
+
>>> nt = torch.nested.nested_tensor([torch.randn((2, 5)), torch.randn((3, 4))])
|
105 |
+
nested_tensor([
|
106 |
+
tensor([[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
|
107 |
+
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995]]),
|
108 |
+
tensor([[-1.8546, -0.7194, -0.2918, -0.1846],
|
109 |
+
[ 0.2773, 0.8793, -0.5183, -0.6447],
|
110 |
+
[ 1.8009, 1.8468, -0.9832, -1.5272]])
|
111 |
+
])
|
112 |
+
>>> pt_infer = torch.nested.to_padded_tensor(nt, 0.0)
|
113 |
+
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276],
|
114 |
+
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995],
|
115 |
+
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]],
|
116 |
+
[[-1.8546, -0.7194, -0.2918, -0.1846, 0.0000],
|
117 |
+
[ 0.2773, 0.8793, -0.5183, -0.6447, 0.0000],
|
118 |
+
[ 1.8009, 1.8468, -0.9832, -1.5272, 0.0000]]])
|
119 |
+
>>> pt_large = torch.nested.to_padded_tensor(nt, 1.0, (2, 4, 6))
|
120 |
+
tensor([[[ 1.6862, -1.1282, 1.1031, 0.0464, -1.3276, 1.0000],
|
121 |
+
[-1.9967, -1.0054, 1.8972, 0.9174, -1.4995, 1.0000],
|
122 |
+
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000],
|
123 |
+
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]],
|
124 |
+
[[-1.8546, -0.7194, -0.2918, -0.1846, 1.0000, 1.0000],
|
125 |
+
[ 0.2773, 0.8793, -0.5183, -0.6447, 1.0000, 1.0000],
|
126 |
+
[ 1.8009, 1.8468, -0.9832, -1.5272, 1.0000, 1.0000],
|
127 |
+
[ 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000]]])
|
128 |
+
>>> pt_small = torch.nested.to_padded_tensor(nt, 2.0, (2, 2, 2))
|
129 |
+
RuntimeError: Value in output_size is less than NestedTensor padded size. Truncation is not supported.
|
130 |
+
|
131 |
+
""",
|
132 |
+
)
|
133 |
+
|
134 |
+
def nested_tensor(tensor_list, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor:
|
135 |
+
r"""
|
136 |
+
Constructs a nested tensor with no autograd history (also known as a “leaf tensor”, see
|
137 |
+
:ref:`Autograd mechanics <autograd-mechanics>`) from :attr:`tensor_list` a list of tensors.
|
138 |
+
|
139 |
+
Args:
|
140 |
+
tensor_list (List[array_like]): a list of tensors, or anything that can be passed to torch.tensor,
|
141 |
+
where each element of the list has the same dimensionality.
|
142 |
+
|
143 |
+
Keyword arguments:
|
144 |
+
dtype (:class:`torch.dtype`, optional): the desired type of returned nested tensor.
|
145 |
+
Default: if None, same :class:`torch.dtype` as leftmost tensor in the list.
|
146 |
+
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
|
147 |
+
Only strided and jagged layouts are supported. Default: if None, the strided layout.
|
148 |
+
device (:class:`torch.device`, optional): the desired device of returned nested tensor.
|
149 |
+
Default: if None, same :class:`torch.device` as leftmost tensor in the list
|
150 |
+
requires_grad (bool, optional): If autograd should record operations on the
|
151 |
+
returned nested tensor. Default: ``False``.
|
152 |
+
pin_memory (bool, optional): If set, returned nested tensor would be allocated in
|
153 |
+
the pinned memory. Works only for CPU tensors. Default: ``False``.
|
154 |
+
|
155 |
+
Example::
|
156 |
+
|
157 |
+
>>> a = torch.arange(3, dtype=torch.float, requires_grad=True)
|
158 |
+
>>> b = torch.arange(5, dtype=torch.float, requires_grad=True)
|
159 |
+
>>> nt = torch.nested.nested_tensor([a, b], requires_grad=True)
|
160 |
+
>>> nt.is_leaf
|
161 |
+
True
|
162 |
+
"""
|
163 |
+
if layout is None:
|
164 |
+
layout = torch.strided
|
165 |
+
if layout == torch.strided:
|
166 |
+
return _nested.nested_tensor(
|
167 |
+
tensor_list,
|
168 |
+
dtype=dtype,
|
169 |
+
device=device,
|
170 |
+
requires_grad=requires_grad,
|
171 |
+
pin_memory=pin_memory)
|
172 |
+
elif layout == torch.jagged:
|
173 |
+
# Need to wrap lists of scalars as tensors
|
174 |
+
list_of_tensors = [t if isinstance(t, Tensor) else torch.as_tensor(t) for t in tensor_list]
|
175 |
+
|
176 |
+
from torch.nested._internal.nested_tensor import jagged_from_list
|
177 |
+
|
178 |
+
with torch.no_grad():
|
179 |
+
nt, _ = jagged_from_list(list_of_tensors, offsets=None, device=device, dtype=dtype)
|
180 |
+
|
181 |
+
nt.requires_grad_(requires_grad)
|
182 |
+
if pin_memory:
|
183 |
+
nt = nt.pin_memory() # type: ignore[assignment]
|
184 |
+
|
185 |
+
return nt
|
186 |
+
else:
|
187 |
+
raise RuntimeError(f"Specified layout is unsupported for nested tensors: {layout}")
|
188 |
+
|
189 |
+
|
190 |
+
def narrow(tensor: Tensor, dim: int, start: Union[int, Tensor], length: Union[int, Tensor], layout=torch.strided) -> Tensor:
|
191 |
+
r"""
|
192 |
+
Constructs a nested tensor (which might be a view) from :attr:`tensor`, a strided tensor. This follows
|
193 |
+
similar semantics to torch.Tensor.narrow, where in the :attr:`dim`-th dimension the new nested tensor
|
194 |
+
shows only the elements in the interval `[start, start+length)`. As nested representations
|
195 |
+
allow for a different `start` and `length` at each 'row' of that dimension, :attr:`start` and :attr:`length`
|
196 |
+
can also be tensors of shape `tensor.shape[0]`.
|
197 |
+
|
198 |
+
There's some differences depending on the layout you use for the nested tensor. If using strided layout,
|
199 |
+
torch.narrow will do a copy of the narrowed data into a contiguous NT with strided layout, while
|
200 |
+
jagged layout narrow() will create a non-contiguous view of your original strided tensor. This particular
|
201 |
+
representation is really useful for representing kv-caches in Transformer models, as specialized
|
202 |
+
SDPA kernels can deal with format easily, resulting in performance improvements.
|
203 |
+
|
204 |
+
|
205 |
+
Args:
|
206 |
+
tensor (:class:`torch.Tensor`): a strided tensor, which will be used as the underlying data
|
207 |
+
for the nested tensor if using the jagged layout or will be copied for the strided layout.
|
208 |
+
dim (int): the dimension where narrow will be applied. Only `dim=1` is supported for the
|
209 |
+
jagged layout, while strided supports all dim
|
210 |
+
start (Union[int, :class:`torch.Tensor`]): starting element for the narrow operation
|
211 |
+
length (Union[int, :class:`torch.Tensor`]): number of elements taken during the narrow op
|
212 |
+
|
213 |
+
Keyword arguments:
|
214 |
+
layout (:class:`torch.layout`, optional): the desired layout of returned nested tensor.
|
215 |
+
Only strided and jagged layouts are supported. Default: if None, the strided layout.
|
216 |
+
|
217 |
+
Example::
|
218 |
+
|
219 |
+
>>> starts = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
|
220 |
+
>>> lengths = torch.tensor([3, 2, 2, 1, 5], dtype=torch.int64)
|
221 |
+
>>> narrow_base = torch.randn(5, 10, 20)
|
222 |
+
>>> nt_narrowed = torch.nested.narrow(narrow_base, 1, starts, lengths, layout=torch.jagged)
|
223 |
+
>>> nt_narrowed.is_contiguous()
|
224 |
+
False
|
225 |
+
"""
|
226 |
+
if not isinstance(start, (int, SymInt, Tensor)):
|
227 |
+
raise RuntimeError("start must be an integer or a tensor")
|
228 |
+
|
229 |
+
if not isinstance(length, (int, SymInt, Tensor)):
|
230 |
+
raise RuntimeError("length must be an integer or a tensor")
|
231 |
+
|
232 |
+
if layout == torch.strided:
|
233 |
+
if isinstance(start, Tensor) or isinstance(length, Tensor):
|
234 |
+
raise RuntimeError("start and length must be integers for the strided layout NT impl")
|
235 |
+
# TODO: switch to as_nested_tensor(tensor) when it is available
|
236 |
+
nt = as_nested_tensor(torch.unbind(tensor), layout=torch.strided).narrow(dim, start, length)
|
237 |
+
elif layout == torch.jagged:
|
238 |
+
if dim != 1:
|
239 |
+
raise RuntimeError("jagged layout only supports dim=1")
|
240 |
+
|
241 |
+
from torch.nested._internal.nested_tensor import jagged_from_tensor_and_lengths
|
242 |
+
|
243 |
+
if isinstance(start, (int, SymInt)):
|
244 |
+
start = torch.tensor([start], device=tensor.device, dtype=torch.int64)
|
245 |
+
|
246 |
+
if isinstance(length, (int, SymInt)):
|
247 |
+
length = torch.tensor([length], device=tensor.device, dtype=torch.int64)
|
248 |
+
|
249 |
+
nt, _, _ = jagged_from_tensor_and_lengths(tensor, start, length)
|
250 |
+
else:
|
251 |
+
raise RuntimeError(f"Specified layout is unsupported for nested narrow: {layout}")
|
252 |
+
|
253 |
+
return nt
|
llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/nested_tensor.py
ADDED
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Tuple
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch._C import DispatchKey, DispatchKeySet
|
5 |
+
from torch._prims_common import is_expandable_to
|
6 |
+
from torch.fx.experimental.symbolic_shapes import has_free_symbols
|
7 |
+
from torch.utils.weak import WeakTensorKeyDictionary
|
8 |
+
from typing import * # noqa: F403
|
9 |
+
|
10 |
+
_tensor_id_counter = 0
|
11 |
+
_tensor_symint_registry = WeakTensorKeyDictionary()
|
12 |
+
|
13 |
+
|
14 |
+
def get_tensor_symint(tensor, *, coeff=1):
|
15 |
+
global _tensor_id_counter
|
16 |
+
tensor_symint = _tensor_symint_registry.get(tensor)
|
17 |
+
if tensor_symint is None:
|
18 |
+
tensor_symint = torch._C._get_nested_int(_tensor_id_counter, coeff)
|
19 |
+
_tensor_id_counter += 1
|
20 |
+
_tensor_symint_registry[tensor] = tensor_symint
|
21 |
+
return tensor_symint
|
22 |
+
|
23 |
+
|
24 |
+
# SDPA metadata; max / min seqlens are needed for e.g. flash
|
25 |
+
def _get_sdpa_extreme_seqlen(func, tensor):
|
26 |
+
return int(func(tensor).item())
|
27 |
+
|
28 |
+
|
29 |
+
class NestedTensor(torch.Tensor):
|
30 |
+
_values: torch.Tensor # type: ignore[assignment]
|
31 |
+
_offsets: torch.Tensor
|
32 |
+
_lengths: Optional[torch.Tensor]
|
33 |
+
# NOTE [ Nested ints for ragged sizes and strides ]
|
34 |
+
#
|
35 |
+
# Jagged layout tensors are tensors that represent a n-dim tensor with a
|
36 |
+
# ragged dimension, but are backed by an (n-1)-dim tensor underneath, e.g.,
|
37 |
+
# a jagged tensor with outer shape [B, x, D] is represented internally by a
|
38 |
+
# tensor with shape [sum(x), D] where we introduce what we call a nested int
|
39 |
+
# denoted as "x" here (but sometimes denoted with "*" to
|
40 |
+
# represent the ragged dimension, and sum(x) represents the dim of the inner
|
41 |
+
# tensor or equivalently the sum of all the sizes of the constituent
|
42 |
+
# tensors' varying lengths.
|
43 |
+
#
|
44 |
+
# We also use nested ints to represent the strides of this tensor.
|
45 |
+
# For example, a jagged tensor with shape [B, x, D] can be strided in two
|
46 |
+
# ways: [xD, D, 1] and [x, 1, sum(x)], where xD represents x multiplied by D
|
47 |
+
_size: Tuple[int, ...]
|
48 |
+
_stride: Tuple[int, ...]
|
49 |
+
# Indicates that the nth dimension is ragged
|
50 |
+
_ragged_idx: int
|
51 |
+
_metadata_cache: Dict[str, Any]
|
52 |
+
|
53 |
+
@staticmethod
|
54 |
+
def __new__(
|
55 |
+
cls,
|
56 |
+
values,
|
57 |
+
offsets,
|
58 |
+
*,
|
59 |
+
lengths=None,
|
60 |
+
**kwargs,
|
61 |
+
):
|
62 |
+
ks = DispatchKeySet(DispatchKey.NestedTensor)
|
63 |
+
ks = ks.add(DispatchKey.AutogradNestedTensor)
|
64 |
+
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
|
65 |
+
cls,
|
66 |
+
(0,),
|
67 |
+
(0,),
|
68 |
+
0,
|
69 |
+
torch.contiguous_format,
|
70 |
+
values.dtype,
|
71 |
+
torch.jagged,
|
72 |
+
values.device,
|
73 |
+
False,
|
74 |
+
kwargs.get("requires_grad", False),
|
75 |
+
"sizes",
|
76 |
+
False,
|
77 |
+
True, # dispatch_layout
|
78 |
+
ks,
|
79 |
+
)
|
80 |
+
return r
|
81 |
+
|
82 |
+
def __init__(self, values, offsets, *, lengths=None, **kwargs):
|
83 |
+
super().__init__()
|
84 |
+
# Only support jagged for now.
|
85 |
+
assert offsets is not None
|
86 |
+
assert offsets.ndim == 1
|
87 |
+
assert not isinstance(values, NestedTensor)
|
88 |
+
|
89 |
+
# Query cache for the symint associated with offsets or lengths
|
90 |
+
# (create a new one if needed).
|
91 |
+
ragged_source = offsets if lengths is None else lengths
|
92 |
+
ragged_size = get_tensor_symint(ragged_source, coeff=1)
|
93 |
+
self._ragged_idx = kwargs.get("_ragged_idx", 1)
|
94 |
+
B = offsets.shape[0] - 1
|
95 |
+
if lengths is not None:
|
96 |
+
assert B == lengths.shape[0]
|
97 |
+
|
98 |
+
# subtract 1 to convert to values dim space
|
99 |
+
r = self._ragged_idx - 1
|
100 |
+
self._size = (B, *values.shape[:r], ragged_size, *values.shape[r + 1 :])
|
101 |
+
stride = values.stride()
|
102 |
+
self._strides = (ragged_size * stride[r], *stride)
|
103 |
+
|
104 |
+
self._values = values
|
105 |
+
self._offsets = offsets
|
106 |
+
self._lengths = lengths
|
107 |
+
|
108 |
+
# holds properties that are computed lazily
|
109 |
+
self._metadata_cache = kwargs.get("_metadata_cache") or {}
|
110 |
+
|
111 |
+
# collapsed ragged dim must always be dynamic
|
112 |
+
torch._dynamo.mark_dynamic(self, self._ragged_idx)
|
113 |
+
torch._dynamo.mark_dynamic(self._values, self._ragged_idx - 1)
|
114 |
+
|
115 |
+
def values(self):
|
116 |
+
# dispatch to get proper view relationship
|
117 |
+
return torch._nested_get_values(self) # type: ignore[return-value]
|
118 |
+
|
119 |
+
def offsets(self):
|
120 |
+
return self._offsets
|
121 |
+
|
122 |
+
def lengths(self):
|
123 |
+
return self._lengths
|
124 |
+
|
125 |
+
@property
|
126 |
+
def _max_seqlen(self):
|
127 |
+
if "max_seqlen" not in self._metadata_cache:
|
128 |
+
# compute & cache
|
129 |
+
self._metadata_cache["max_seqlen"] = _get_sdpa_extreme_seqlen(
|
130 |
+
torch.max,
|
131 |
+
self._offsets.diff() if self._lengths is None else self._lengths,
|
132 |
+
)
|
133 |
+
return self._metadata_cache["max_seqlen"]
|
134 |
+
|
135 |
+
@property
|
136 |
+
def _min_seqlen(self):
|
137 |
+
if "min_seqlen" not in self._metadata_cache:
|
138 |
+
# compute & cache
|
139 |
+
self._metadata_cache["min_seqlen"] = _get_sdpa_extreme_seqlen(
|
140 |
+
torch.min,
|
141 |
+
self._offsets.diff() if self._lengths is None else self._lengths,
|
142 |
+
)
|
143 |
+
return self._metadata_cache["min_seqlen"]
|
144 |
+
|
145 |
+
def __repr__(self):
|
146 |
+
# We should implement this in torch/_tensor_str.py instead
|
147 |
+
grad_fn_str = (
|
148 |
+
f", requires_grad={self.requires_grad}" if self.requires_grad else ""
|
149 |
+
)
|
150 |
+
if self.grad_fn:
|
151 |
+
grad_fn_str = f", grad_fn={self.grad_fn}"
|
152 |
+
return f"NestedTensor(size={self._size}, offsets={self._offsets}{grad_fn_str}, contiguous={self._lengths is None})"
|
153 |
+
|
154 |
+
def __reduce_ex__(self, proto):
|
155 |
+
state = torch._utils._get_obj_state(self)
|
156 |
+
|
157 |
+
# SymNodes are not serializable
|
158 |
+
assert "_size" in state and "_strides" in state
|
159 |
+
state = dict(state)
|
160 |
+
del state["_size"]
|
161 |
+
del state["_strides"]
|
162 |
+
|
163 |
+
func = NestedTensor
|
164 |
+
args = (self._values, self._offsets)
|
165 |
+
return (torch._tensor._rebuild_from_type_v2, (func, type(self), args, state))
|
166 |
+
|
167 |
+
def __tensor_flatten__(self):
|
168 |
+
ctx = {
|
169 |
+
"requires_grad": self.requires_grad,
|
170 |
+
# TODO: Don't guard on this!
|
171 |
+
"metadata_cache": self._metadata_cache,
|
172 |
+
"ragged_idx": self._ragged_idx,
|
173 |
+
}
|
174 |
+
inner_tensors = ["_values", "_offsets"]
|
175 |
+
if self._lengths is not None:
|
176 |
+
inner_tensors.append("_lengths")
|
177 |
+
return inner_tensors, ctx
|
178 |
+
|
179 |
+
@staticmethod
|
180 |
+
def __tensor_unflatten__(inner_tensors: Dict, meta, outer_size, outer_stride):
|
181 |
+
# inner tensors: _values, _offsets, [_lengths]
|
182 |
+
assert len(inner_tensors) >= 2 and len(inner_tensors) <= 3
|
183 |
+
values = inner_tensors["_values"]
|
184 |
+
offsets = inner_tensors["_offsets"]
|
185 |
+
lengths = inner_tensors.get("_lengths", None)
|
186 |
+
ragged_idx = meta["ragged_idx"]
|
187 |
+
|
188 |
+
# Note that we cannot simply check if is_fake(values) because
|
189 |
+
# during aot autograd, FunctionalTensors are not fake but hold
|
190 |
+
# symbolic sizes.
|
191 |
+
ragged_source = offsets if lengths is None else lengths
|
192 |
+
if has_free_symbols(ragged_source) or has_free_symbols(values):
|
193 |
+
# Associate offsets or lengths (possibly fake, possibly functionalized)
|
194 |
+
# with the ragged_size.
|
195 |
+
ragged_size = outer_size[ragged_idx]
|
196 |
+
_tensor_symint_registry[ragged_source] = ragged_size
|
197 |
+
|
198 |
+
return NestedTensor(
|
199 |
+
values,
|
200 |
+
offsets=offsets,
|
201 |
+
lengths=lengths,
|
202 |
+
requires_grad=meta["requires_grad"],
|
203 |
+
_ragged_idx=ragged_idx,
|
204 |
+
_metadata_cache=meta["metadata_cache"],
|
205 |
+
)
|
206 |
+
|
207 |
+
@classmethod
|
208 |
+
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
|
209 |
+
kwargs = {} if kwargs is None else kwargs
|
210 |
+
|
211 |
+
# Lazy import to avoid circular dependency
|
212 |
+
from .ops import lookup_jagged
|
213 |
+
|
214 |
+
fn = lookup_jagged(func, *args, **kwargs)
|
215 |
+
if fn is not None:
|
216 |
+
return fn(*args, **kwargs)
|
217 |
+
|
218 |
+
raise NotImplementedError(func)
|
219 |
+
|
220 |
+
@classmethod
|
221 |
+
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
222 |
+
if kwargs is None:
|
223 |
+
kwargs = {}
|
224 |
+
|
225 |
+
from .ops import jagged_torch_function
|
226 |
+
|
227 |
+
try:
|
228 |
+
return jagged_torch_function(func, *args, **kwargs)
|
229 |
+
except NotImplementedError:
|
230 |
+
pass
|
231 |
+
with torch._C.DisableTorchFunctionSubclass():
|
232 |
+
return func(*args, **kwargs)
|
233 |
+
|
234 |
+
|
235 |
+
# NB: These fake view autograd.Functions are superseded by real view ops. Don't use them!
|
236 |
+
# TODO: Remove ViewBufferFromNested, ViewNestedFromBuffer, and buffer_from_jagged once the
|
237 |
+
# internal BC period has passed.
|
238 |
+
|
239 |
+
|
240 |
+
# Not actually a view!
|
241 |
+
class ViewBufferFromNested(torch.autograd.Function):
|
242 |
+
@staticmethod
|
243 |
+
def forward(ctx, x: NestedTensor): # type: ignore[override]
|
244 |
+
ctx.save_for_backward(x.offsets())
|
245 |
+
ctx.metadata_cache = x._metadata_cache
|
246 |
+
ctx.ragged_idx = x._ragged_idx
|
247 |
+
return x._values
|
248 |
+
|
249 |
+
@staticmethod
|
250 |
+
def backward(ctx, gO: torch.Tensor): # type: ignore[override]
|
251 |
+
(offsets,) = ctx.saved_tensors
|
252 |
+
return NestedTensor(
|
253 |
+
gO,
|
254 |
+
offsets=offsets,
|
255 |
+
_metadata_cache=ctx.metadata_cache,
|
256 |
+
_ragged_idx=ctx.ragged_idx,
|
257 |
+
)
|
258 |
+
|
259 |
+
|
260 |
+
# Not actually a view!
|
261 |
+
class ViewNestedFromBuffer(torch.autograd.Function):
|
262 |
+
@staticmethod
|
263 |
+
def forward(
|
264 |
+
ctx,
|
265 |
+
values: torch.Tensor,
|
266 |
+
offsets: torch.Tensor,
|
267 |
+
metadata_cache: Optional[Dict[str, Any]] = None,
|
268 |
+
): # type: ignore[override]
|
269 |
+
return NestedTensor(
|
270 |
+
values.detach(),
|
271 |
+
offsets=offsets,
|
272 |
+
_metadata_cache=metadata_cache,
|
273 |
+
)
|
274 |
+
|
275 |
+
@staticmethod
|
276 |
+
def backward(ctx, gO: NestedTensor): # type: ignore[override]
|
277 |
+
return gO._values, None, None
|
278 |
+
|
279 |
+
|
280 |
+
def buffer_from_jagged(jagged):
|
281 |
+
return ViewBufferFromNested.apply(jagged)
|
282 |
+
|
283 |
+
|
284 |
+
# Need to make it obvious that users should be passing in offsets
|
285 |
+
def jagged_from_list(
|
286 |
+
tensors: List[torch.Tensor],
|
287 |
+
offsets: Optional[torch.Tensor],
|
288 |
+
dtype=None,
|
289 |
+
device=None,
|
290 |
+
) -> Tuple[NestedTensor, torch.Tensor]:
|
291 |
+
"""Constructs a NestedTensor backed by jagged layout from a list of tensors"""
|
292 |
+
|
293 |
+
if not len(set(t.dtype for t in tensors)) == 1: # noqa: C401
|
294 |
+
raise RuntimeError(
|
295 |
+
"When constructing a nested tensor, all tensors in list must have the same dtype"
|
296 |
+
)
|
297 |
+
if not len(set(t.device for t in tensors)) == 1: # noqa: C401
|
298 |
+
raise RuntimeError(
|
299 |
+
"When constructing a nested tensor, all tensors in list must be on the same device"
|
300 |
+
)
|
301 |
+
|
302 |
+
# Check that the NT is representable by the jagged layout.
|
303 |
+
# Jagged layout represents (B, *, D_0, D_1, ..., D_N), where the only
|
304 |
+
# raggedness allowed is for the single dim immediately adjacent to the batch dim.
|
305 |
+
sizes = [t.shape for t in tensors]
|
306 |
+
non_first_sizes = [s[1:] for s in sizes]
|
307 |
+
at_most_first_ragged = all(s == non_first_sizes[0] for s in non_first_sizes)
|
308 |
+
if not at_most_first_ragged:
|
309 |
+
raise RuntimeError(
|
310 |
+
"Cannot represent given tensor list as a nested tensor with the jagged layout. "
|
311 |
+
"Note that the jagged layout only represents shapes of the form "
|
312 |
+
"(B, *, D_0, D_1, ..., D_N), with only * allowed to be ragged."
|
313 |
+
)
|
314 |
+
|
315 |
+
# Set properties appropriately.
|
316 |
+
values = torch.cat(tensors, dim=0)
|
317 |
+
to_kwargs = {}
|
318 |
+
if device is not None:
|
319 |
+
to_kwargs["device"] = device
|
320 |
+
if dtype is not None:
|
321 |
+
to_kwargs["dtype"] = dtype
|
322 |
+
values = values.to(**to_kwargs)
|
323 |
+
|
324 |
+
# Calculate jagged offsets if not provided.
|
325 |
+
if offsets is None:
|
326 |
+
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
|
327 |
+
# TODO: An alternative way to construct offsets is to use F.pad. This avoids creating
|
328 |
+
# an extra leaf tensor during the forward, potentially resolving compatibility issues.
|
329 |
+
offsets = torch.cat(
|
330 |
+
[
|
331 |
+
torch.zeros(1, dtype=torch.int64, device=values.device),
|
332 |
+
torch.tensor([s[0] for s in sizes], device=values.device).cumsum(dim=0),
|
333 |
+
]
|
334 |
+
)
|
335 |
+
|
336 |
+
ret_nt = nested_view_from_values_offsets(values, offsets)
|
337 |
+
ret_nt._metadata_cache = {
|
338 |
+
# compute this now since it's easy
|
339 |
+
"max_seqlen": max([t.shape[0] for t in tensors]),
|
340 |
+
"min_seqlen": min([t.shape[0] for t in tensors]),
|
341 |
+
}
|
342 |
+
return (ret_nt, offsets) # type: ignore[return-value]
|
343 |
+
|
344 |
+
|
345 |
+
def jagged_from_tensor_and_lengths(
|
346 |
+
tensor: torch.Tensor, starts: torch.Tensor, lengths: torch.Tensor
|
347 |
+
) -> Tuple[NestedTensor, torch.Tensor, Optional[torch.Tensor]]:
|
348 |
+
"""Constructs a NestedTensor backed by jagged layout from a tensor, starts of sequences, and sequence lengths"""
|
349 |
+
batch_size = tensor.shape[0]
|
350 |
+
if is_expandable_to(starts.shape, (batch_size,)) and is_expandable_to(
|
351 |
+
lengths.shape, (batch_size,)
|
352 |
+
):
|
353 |
+
start_list = starts.expand(batch_size)
|
354 |
+
length_list = lengths.expand(batch_size)
|
355 |
+
else:
|
356 |
+
raise RuntimeError(
|
357 |
+
"When constructing a jagged nested tensor using narrow(), "
|
358 |
+
"your start and length must be Tensors that broadcast to input.shape[0]"
|
359 |
+
)
|
360 |
+
|
361 |
+
# Calculate jagged offsets
|
362 |
+
assert (
|
363 |
+
len(tensor.shape) >= 2
|
364 |
+
), "tensor must at least be 2D for the nested narrow op to work"
|
365 |
+
max_seq_len = tensor.shape[1]
|
366 |
+
offset_lengths = max_seq_len * torch.arange(
|
367 |
+
0, batch_size, dtype=torch.int64, device=tensor.device
|
368 |
+
)
|
369 |
+
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
|
370 |
+
offsets = torch.cat(
|
371 |
+
[
|
372 |
+
start_list + offset_lengths,
|
373 |
+
(start_list[-1] + offset_lengths[-1] + length_list[-1]).unsqueeze(0),
|
374 |
+
]
|
375 |
+
)
|
376 |
+
|
377 |
+
# Reshape buffer to flatten the 1st and 2nd dimension (view used to enforce non-copy)
|
378 |
+
if len(tensor.shape) > 2:
|
379 |
+
values = tensor.view(-1, *tensor.shape[2:])
|
380 |
+
else:
|
381 |
+
values = tensor.view(-1)
|
382 |
+
|
383 |
+
# Check if offsets and lengths make it possibly contiguous and return a regular NT
|
384 |
+
is_contiguous = True
|
385 |
+
orig_dim = tensor.shape[1]
|
386 |
+
if torch.any(length_list[1:-1].ne(orig_dim)):
|
387 |
+
is_contiguous = False
|
388 |
+
if torch.any(offsets[1:-2].diff().ne(orig_dim)):
|
389 |
+
is_contiguous = False
|
390 |
+
if offsets[0] + length_list[0] != orig_dim:
|
391 |
+
is_contiguous = False
|
392 |
+
|
393 |
+
actual_max_seqlen = int(torch.max(lengths).item())
|
394 |
+
min_seqlen = int(torch.min(lengths).item())
|
395 |
+
|
396 |
+
if is_contiguous:
|
397 |
+
ret_nt = nested_view_from_values_offsets(
|
398 |
+
values[offsets[0] : offsets[-1]], offsets - offsets[0]
|
399 |
+
)
|
400 |
+
else:
|
401 |
+
ret_nt = nested_view_from_values_offsets_lengths(values, offsets, length_list)
|
402 |
+
|
403 |
+
# populate metadata cache with computed seqlen extremes
|
404 |
+
ret_nt._metadata_cache = {
|
405 |
+
"max_seqlen": actual_max_seqlen,
|
406 |
+
"min_seqlen": min_seqlen,
|
407 |
+
}
|
408 |
+
|
409 |
+
return (ret_nt, offsets, None if is_contiguous else length_list)
|
410 |
+
|
411 |
+
|
412 |
+
# NB: A dummy arg is required so that NestedTensor.__torch_dispatch__() is invoked
|
413 |
+
# for _nested_view_from_values_offsets(). Sizes don't matter much, but they shouldn't be
|
414 |
+
# 0/1 because the dummy can be fake-ified and we want to avoid specializing.
|
415 |
+
# This arg is otherwise unused.
|
416 |
+
_nt_view_dummy = NestedTensor(
|
417 |
+
values=torch.randn(3, 3, device="meta"),
|
418 |
+
offsets=torch.randint(3, (2,), device="meta", dtype=torch.int64),
|
419 |
+
).detach()
|
420 |
+
|
421 |
+
|
422 |
+
def nested_view_from_values_offsets(values, offsets, ragged_idx=1):
|
423 |
+
return torch._nested_view_from_jagged(
|
424 |
+
values, offsets, _nt_view_dummy, None, ragged_idx
|
425 |
+
) # type: ignore[return-value]
|
426 |
+
|
427 |
+
|
428 |
+
def nested_view_from_values_offsets_lengths(values, offsets, lengths, ragged_idx=1):
|
429 |
+
return torch._nested_view_from_jagged(
|
430 |
+
values, offsets, _nt_view_dummy, lengths, ragged_idx
|
431 |
+
) # type: ignore[return-value]
|
llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/ops.py
ADDED
@@ -0,0 +1,1120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import math
|
3 |
+
import operator
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch.nested._internal.sdpa import jagged_scaled_dot_product_attention
|
7 |
+
|
8 |
+
from .nested_tensor import NestedTensor
|
9 |
+
from typing import * # noqa: F403
|
10 |
+
import torch.nn.functional as F
|
11 |
+
from torch.fx.operator_schemas import normalize_function
|
12 |
+
|
13 |
+
__all__: List[Any] = []
|
14 |
+
|
15 |
+
JAGGED_OPS_TABLE: Dict[Any, Any] = {}
|
16 |
+
|
17 |
+
|
18 |
+
# Simplifying assumption: we assume that the batch dim is always the left-most
|
19 |
+
# dim, and the ragged dim is always the second dim.
|
20 |
+
def _outer_to_inner_dim(ndim, dim):
|
21 |
+
assert dim >= 0 and dim < ndim
|
22 |
+
return 0 if dim < 2 else dim - 1
|
23 |
+
|
24 |
+
|
25 |
+
def _wrap_jagged_dim(
|
26 |
+
ndim, dim, op_name, convert_to_inner_dim=True, allow_batch_dim=False
|
27 |
+
):
|
28 |
+
from torch._prims_common import canonicalize_dims
|
29 |
+
|
30 |
+
wrapped = canonicalize_dims(ndim, dim)
|
31 |
+
if wrapped == 1:
|
32 |
+
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=1")
|
33 |
+
elif wrapped == 0 and not allow_batch_dim:
|
34 |
+
raise RuntimeError(f"{op_name}(): not supported for NestedTensor on dim=0")
|
35 |
+
return _outer_to_inner_dim(ndim, wrapped) if convert_to_inner_dim else wrapped
|
36 |
+
|
37 |
+
|
38 |
+
def _wrap_jagged_dims(ndim, dims, op_name):
|
39 |
+
# ex: (2, 3, 4) -> (1, 2, 3)
|
40 |
+
# ex: (0, 1, 4) -> (0, 3)
|
41 |
+
from torch._prims_common import canonicalize_dims
|
42 |
+
|
43 |
+
wrapped_dims = [canonicalize_dims(ndim, d) for d in dims]
|
44 |
+
# This logic needs to be done after we canonicalize dims but before we
|
45 |
+
# map to inner dims so we can print a nicer error message.
|
46 |
+
zero_in_dims = 0 in wrapped_dims
|
47 |
+
one_in_dims = 1 in wrapped_dims
|
48 |
+
if zero_in_dims ^ one_in_dims:
|
49 |
+
apply, not_apply = ("batch", "ragged") if zero_in_dims else ("ragged", "batch")
|
50 |
+
raise RuntimeError(
|
51 |
+
f"{op_name}(): applying over the {apply} dimension, but not the {not_apply}"
|
52 |
+
" dimension is not supported for NestedTensor"
|
53 |
+
)
|
54 |
+
return (
|
55 |
+
tuple(_outer_to_inner_dim(ndim, d) for d in dims if d != 0),
|
56 |
+
zero_in_dims,
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
def check_schema(schema_str: str, func, *args, **kwargs) -> None:
|
61 |
+
named_arg_types = schema_str.split(", ")
|
62 |
+
num_optional_args = sum([x.endswith("?") for x in named_arg_types])
|
63 |
+
min_args = len(named_arg_types) - num_optional_args
|
64 |
+
|
65 |
+
# special case: ellipses allows for any number of unchecked args at the end
|
66 |
+
if named_arg_types[-1] == "...":
|
67 |
+
named_arg_types = named_arg_types[:-1]
|
68 |
+
else:
|
69 |
+
if not (len(args) >= min_args and len(args) <= len(named_arg_types)):
|
70 |
+
raise ValueError(
|
71 |
+
f"NestedTensor {func.__name__}({schema_str}): expected at least {min_args} "
|
72 |
+
f"arguments and at most {len(named_arg_types)} arguments, but got: "
|
73 |
+
f"{len(args)} arguments"
|
74 |
+
)
|
75 |
+
|
76 |
+
arg_type_check_fns = {
|
77 |
+
"t": lambda x: isinstance(x, torch.Tensor) and not isinstance(x, NestedTensor),
|
78 |
+
"jt": lambda x: isinstance(x, NestedTensor)
|
79 |
+
and x._lengths is None
|
80 |
+
and x._ragged_idx == 1, # ops with "jt" require contiguous JT only
|
81 |
+
"jt_all": lambda x: isinstance(
|
82 |
+
x, NestedTensor
|
83 |
+
), # ops with "jt_all" can accept all kinds of JT
|
84 |
+
"any": lambda x: True,
|
85 |
+
}
|
86 |
+
for i, named_arg_type in enumerate(named_arg_types):
|
87 |
+
name, arg_type = named_arg_type.split(": ")
|
88 |
+
is_optional = arg_type.endswith("?")
|
89 |
+
normalized_arg_type = arg_type[:-1] if is_optional else arg_type
|
90 |
+
if normalized_arg_type not in arg_type_check_fns.keys():
|
91 |
+
raise AssertionError(f"Unknown arg type: {normalized_arg_type}")
|
92 |
+
|
93 |
+
if i >= len(args):
|
94 |
+
if not is_optional:
|
95 |
+
raise ValueError(
|
96 |
+
f"NestedTensor {func.__name__}({schema_str}) "
|
97 |
+
f"missing required argument: {name}"
|
98 |
+
)
|
99 |
+
continue
|
100 |
+
|
101 |
+
_check_fn = arg_type_check_fns[normalized_arg_type]
|
102 |
+
|
103 |
+
def check_fn(x, is_optional=is_optional):
|
104 |
+
if is_optional:
|
105 |
+
return x is None or _check_fn(x)
|
106 |
+
else:
|
107 |
+
return _check_fn(x)
|
108 |
+
|
109 |
+
if not check_fn(args[i]):
|
110 |
+
type_to_desc = {
|
111 |
+
"t": "tensor",
|
112 |
+
"t?": "optional tensor",
|
113 |
+
"jt": "contiguous jagged layout NestedTensor",
|
114 |
+
"jt_all": "jagged layout NestedTensor",
|
115 |
+
"any": "<any type>",
|
116 |
+
}
|
117 |
+
|
118 |
+
raise ValueError(
|
119 |
+
f"NestedTensor {func.__name__}({schema_str}): expected {name} to be a "
|
120 |
+
f"{type_to_desc[arg_type]}"
|
121 |
+
)
|
122 |
+
|
123 |
+
|
124 |
+
def check_ragged_dim_same(
|
125 |
+
func, a: NestedTensor, a_name: str, b: NestedTensor, b_name: str
|
126 |
+
) -> None:
|
127 |
+
# Calling into .shape here
|
128 |
+
if a._size[a._ragged_idx] != b._size[b._ragged_idx]:
|
129 |
+
raise RuntimeError(
|
130 |
+
f"NestedTensor {func.__name__}: expected {a_name} and {b_name} to have the "
|
131 |
+
"same exact offsets tensor."
|
132 |
+
)
|
133 |
+
|
134 |
+
|
135 |
+
# returns True if the raggedness-relevant portions of the NT shape
|
136 |
+
# match those of the specified size
|
137 |
+
def raggedness_matches(nt, size):
|
138 |
+
end = nt._ragged_idx + 1
|
139 |
+
nt_ragged = nt._size[:end]
|
140 |
+
size_ragged = size[:end]
|
141 |
+
return len(nt_ragged) == len(size_ragged) and (
|
142 |
+
all(ns == s or s == -1 for ns, s in zip(nt_ragged, size_ragged))
|
143 |
+
)
|
144 |
+
|
145 |
+
|
146 |
+
def squeeze_leading_ones(t):
|
147 |
+
# Note: [ Squeezing leading ones ]
|
148 |
+
#
|
149 |
+
# Squeeze leading ones from t.
|
150 |
+
#
|
151 |
+
# We want:
|
152 |
+
# (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
|
153 |
+
# (B, j0, ?, ?) + (1, 1, 1, ?, ?) -> (1, B, j0, ?, ?) (not yet supported)
|
154 |
+
#
|
155 |
+
# 1) Squeeze extra ones and grab values from NT
|
156 |
+
# (1, 1, ?, ?) -> (?, ?) and (sum(*), ?, ?) -> (B, j0, ?, ?)
|
157 |
+
# 2) Do dense broadcasting:
|
158 |
+
# (sum(*), ?, ?) + (?, ?) -> (sum(*), ?, ?)
|
159 |
+
# 3) Construct nested tensor
|
160 |
+
# (sum(*), ?, ?) -> (B, j0, ?, ?)
|
161 |
+
#
|
162 |
+
# If unsqueezing on the 0th dim becomes supported, we would unsqueeze
|
163 |
+
# at step (4) and we would need to update this function to record how
|
164 |
+
# many ones we unsqueezed.
|
165 |
+
while t.shape[0] == 1:
|
166 |
+
t = t.squeeze(0)
|
167 |
+
return t
|
168 |
+
|
169 |
+
|
170 |
+
def register_func(tables, aten_ops, schema_str):
|
171 |
+
if not isinstance(aten_ops, list):
|
172 |
+
aten_ops = [aten_ops]
|
173 |
+
if not isinstance(tables, list):
|
174 |
+
tables = [tables]
|
175 |
+
|
176 |
+
def wrapper(func):
|
177 |
+
for aten_op in aten_ops:
|
178 |
+
|
179 |
+
def get_inner(aten_op):
|
180 |
+
def inner(*args, **kwargs):
|
181 |
+
check_schema(schema_str, func, *args, **kwargs)
|
182 |
+
return func(aten_op, *args, **kwargs)
|
183 |
+
|
184 |
+
return inner
|
185 |
+
|
186 |
+
for table in tables:
|
187 |
+
table[aten_op] = get_inner(aten_op)
|
188 |
+
return func
|
189 |
+
|
190 |
+
return wrapper
|
191 |
+
|
192 |
+
|
193 |
+
register_jagged_func = functools.partial(register_func, JAGGED_OPS_TABLE)
|
194 |
+
|
195 |
+
|
196 |
+
def lookup_jagged(func, *args, **kwargs) -> Optional[Callable]:
|
197 |
+
dispatch_func = JAGGED_OPS_TABLE.get(func, None)
|
198 |
+
if dispatch_func is not None:
|
199 |
+
return dispatch_func
|
200 |
+
|
201 |
+
# Handle pointwise fallbacks
|
202 |
+
if torch.Tag.pointwise in func.tags:
|
203 |
+
# Assume there aren't additional tensors that aren't the "unary/binary" args
|
204 |
+
num_tensor_args = sum([isinstance(x, torch.Tensor) for x in args])
|
205 |
+
if num_tensor_args == 1:
|
206 |
+
check_schema("self: jt_all, ...", func, *args, **kwargs)
|
207 |
+
return functools.partial(jagged_unary_pointwise, func)
|
208 |
+
elif num_tensor_args == 2:
|
209 |
+
check_schema("lhs: any, rhs: any, ...", func, *args, **kwargs)
|
210 |
+
return functools.partial(jagged_binary_pointwise, func)
|
211 |
+
|
212 |
+
return None
|
213 |
+
|
214 |
+
|
215 |
+
def extract_kwargs(arg):
|
216 |
+
kwargs = {
|
217 |
+
"offsets": arg.offsets(),
|
218 |
+
"_metadata_cache": arg._metadata_cache,
|
219 |
+
"_ragged_idx": arg._ragged_idx,
|
220 |
+
}
|
221 |
+
return kwargs
|
222 |
+
|
223 |
+
|
224 |
+
def jagged_unary_pointwise(func, *args, **kwargs):
|
225 |
+
return NestedTensor(
|
226 |
+
func(args[0]._values, *args[1:], **kwargs), **extract_kwargs(args[0])
|
227 |
+
)
|
228 |
+
|
229 |
+
|
230 |
+
def jagged_binary_pointwise(func, *args, **kwargs):
|
231 |
+
a, b = args[0], args[1]
|
232 |
+
assert isinstance(a, NestedTensor) or isinstance(b, NestedTensor)
|
233 |
+
|
234 |
+
mismatch_error_msg = (
|
235 |
+
"cannot call binary pointwise function {} with inputs of shapes {} and {}"
|
236 |
+
)
|
237 |
+
# a is NT, b is NT
|
238 |
+
if isinstance(a, NestedTensor) and isinstance(b, NestedTensor):
|
239 |
+
# ex: (B, j0, D) + (B, j0, D)
|
240 |
+
# ex: (B, j0, D) + (B, j0, 1)
|
241 |
+
if raggedness_matches(a, b._size):
|
242 |
+
return NestedTensor(
|
243 |
+
func(a._values, b._values, *args[2:], **kwargs), **extract_kwargs(a)
|
244 |
+
)
|
245 |
+
raise RuntimeError(mismatch_error_msg.format(func.__name__, a._size, b._size))
|
246 |
+
# either a is NT or b is NT at this point
|
247 |
+
a_is_nt = isinstance(a, NestedTensor)
|
248 |
+
extracted_kwargs = extract_kwargs(a) if a_is_nt else extract_kwargs(b)
|
249 |
+
|
250 |
+
# === Handle broadcasting across the batch / ragged dims ===
|
251 |
+
|
252 |
+
# Easy case: take advantage of pre-existing broadcasting logic
|
253 |
+
# ex: (B, j0, ?, ?) + (?) -> (B, j0, ?, ?)
|
254 |
+
# ex: (B, j0, ?, ?) + (?, ?) -> (B, j0, ?, ?)
|
255 |
+
# ex: (B, j0, ?, ?) + (1, 1, ?, ?) -> (B, j0, ?, ?)
|
256 |
+
nt, t = (a, b) if a_is_nt else (b, a)
|
257 |
+
# See Note: [ Squeezing leading ones ]
|
258 |
+
if t.dim() > nt.dim():
|
259 |
+
raise NotImplementedError("NYI: broadcasting NT with T with larger dim")
|
260 |
+
t_squeezed = squeeze_leading_ones(t)
|
261 |
+
if nt.dim() >= t_squeezed.dim() + 2:
|
262 |
+
lhs, rhs = (nt._values, t_squeezed) if a_is_nt else (t_squeezed, nt._values)
|
263 |
+
return NestedTensor(func(lhs, rhs, *args[2:], **kwargs), **extracted_kwargs)
|
264 |
+
|
265 |
+
# Harder case: do manual broadcasting over unbound components
|
266 |
+
# when NT dim == non-NT dim
|
267 |
+
# ex: (B, j0, D_0, D_1) + (B, 1, D_0, D_1) -> (B, j0, D_0, D_1)
|
268 |
+
if a.dim() == b.dim():
|
269 |
+
# ex: (B, j0, D_0, D_1) + (1, 1, D_0, D_1) -> should
|
270 |
+
# be (B, j0, D_0, D_1) but not yet supported
|
271 |
+
if a.shape[0] != b.shape[0]:
|
272 |
+
raise RuntimeError(
|
273 |
+
mismatch_error_msg.format(func.__name__, a.shape, b.shape)
|
274 |
+
)
|
275 |
+
|
276 |
+
# need to use offsets to broadcast across ragged dim properly
|
277 |
+
# NB: inefficient fallback here; Triton codegen can help this
|
278 |
+
# TODO: Make this work with autograd
|
279 |
+
outputs = []
|
280 |
+
for a_comp, b_comp in zip(a.unbind(), b.unbind()):
|
281 |
+
outputs.append(func(a_comp, b_comp, *args[2:], **kwargs))
|
282 |
+
new_values = torch.cat(outputs, dim=0)
|
283 |
+
return NestedTensor(new_values, **extracted_kwargs)
|
284 |
+
|
285 |
+
# ex: (B, j0, D_0, D_1) + (A, B, 1, D_0, D_1) -> error because this breaks the invariant
|
286 |
+
# that ragged dim is wrt left-most batch dim
|
287 |
+
raise RuntimeError(mismatch_error_msg.format(func.__name__, a.shape, b.shape))
|
288 |
+
|
289 |
+
|
290 |
+
def jagged_torch_function(func, *args, **kwargs):
|
291 |
+
# SDPA has special kernels that handle nested tensors.
|
292 |
+
# Dispatch to the correct implementation here
|
293 |
+
if func is torch._C._nn.scaled_dot_product_attention:
|
294 |
+
return jagged_scaled_dot_product_attention(*args, **kwargs)
|
295 |
+
|
296 |
+
# Handle flatten() here because it's CompositeImplicit.
|
297 |
+
if func.__name__ == "flatten":
|
298 |
+
|
299 |
+
def _flatten_sig(input, start_dim=0, end_dim=-1):
|
300 |
+
pass
|
301 |
+
|
302 |
+
_, new_kwargs = normalize_function(
|
303 |
+
_flatten_sig, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
304 |
+
)
|
305 |
+
|
306 |
+
inp = new_kwargs.pop("input")
|
307 |
+
|
308 |
+
# NB: stay in outer dim space because we're going to redispatch on a NT input
|
309 |
+
start_dim = _wrap_jagged_dim(
|
310 |
+
inp.dim(), new_kwargs["start_dim"], "flatten", convert_to_inner_dim=False
|
311 |
+
)
|
312 |
+
end_dim = _wrap_jagged_dim(
|
313 |
+
inp.dim(), new_kwargs["end_dim"], "flatten", convert_to_inner_dim=False
|
314 |
+
)
|
315 |
+
|
316 |
+
if start_dim == end_dim:
|
317 |
+
return inp
|
318 |
+
|
319 |
+
product = functools.reduce(operator.mul, inp.shape[start_dim : end_dim + 1])
|
320 |
+
new_shape = (*inp.shape[:start_dim], product, *inp.shape[end_dim + 1 :])
|
321 |
+
|
322 |
+
return inp.reshape(*new_shape)
|
323 |
+
|
324 |
+
raise NotImplementedError(func)
|
325 |
+
|
326 |
+
|
327 |
+
@register_jagged_func(
|
328 |
+
[
|
329 |
+
torch.ops.aten.is_non_overlapping_and_dense.default,
|
330 |
+
torch.ops.aten.sym_size.default,
|
331 |
+
torch.ops.aten.dim.default,
|
332 |
+
torch.ops.aten.sym_numel.default,
|
333 |
+
torch.ops.aten.sym_stride.default,
|
334 |
+
torch.ops.aten.sym_storage_offset.default,
|
335 |
+
],
|
336 |
+
"self: jt_all",
|
337 |
+
)
|
338 |
+
def tensor_attr_supported_getter(func, *args, **kwargs):
|
339 |
+
if func == torch.ops.aten.is_non_overlapping_and_dense.default:
|
340 |
+
return False
|
341 |
+
|
342 |
+
if func == torch.ops.aten.sym_size.default:
|
343 |
+
return args[0]._size
|
344 |
+
|
345 |
+
if func == torch.ops.aten.dim.default:
|
346 |
+
return len(args[0]._size)
|
347 |
+
|
348 |
+
if func == torch.ops.aten.sym_numel.default:
|
349 |
+
if args[0]._lengths is not None:
|
350 |
+
return int(sum(args[0]._lengths) * math.prod(args[0]._size[2:]))
|
351 |
+
return args[0]._values.numel()
|
352 |
+
|
353 |
+
if func == torch.ops.aten.sym_stride.default:
|
354 |
+
return args[0]._strides
|
355 |
+
|
356 |
+
if func == torch.ops.aten.sym_storage_offset.default:
|
357 |
+
return args[0]._values.storage_offset()
|
358 |
+
|
359 |
+
|
360 |
+
@register_jagged_func(torch.ops.prim.layout.default, "self: jt_all")
|
361 |
+
def prim_layout_default(func, *args, **kwargs):
|
362 |
+
return torch.jagged
|
363 |
+
|
364 |
+
|
365 |
+
@register_jagged_func(
|
366 |
+
[torch.ops.aten.size.default],
|
367 |
+
"self: jt_all",
|
368 |
+
)
|
369 |
+
def tensor_attr_unsupported_getter(func, *args, **kwargs):
|
370 |
+
if func == torch.ops.aten.size.default:
|
371 |
+
raise RuntimeError(
|
372 |
+
"NestedTensors does not support directly calling torch.ops.aten.size "
|
373 |
+
"please use `nested_tensor.size()` instead."
|
374 |
+
)
|
375 |
+
|
376 |
+
|
377 |
+
@register_jagged_func(torch.ops.aten.is_contiguous.default, "self: jt_all")
|
378 |
+
def is_contiguous_general(func, *args, **kwargs):
|
379 |
+
from torch._prims_common import is_contiguous_for_memory_format
|
380 |
+
|
381 |
+
_, new_kwargs = normalize_function(
|
382 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
383 |
+
)
|
384 |
+
inp = new_kwargs.pop("input")
|
385 |
+
|
386 |
+
# If created from narrow() check for lengths
|
387 |
+
if inp.lengths() is not None:
|
388 |
+
return False
|
389 |
+
|
390 |
+
new_kwargs["memory_format"] = new_kwargs.get(
|
391 |
+
"memory_format", torch.contiguous_format
|
392 |
+
)
|
393 |
+
if new_kwargs["memory_format"] == torch.preserve_format:
|
394 |
+
return True
|
395 |
+
return is_contiguous_for_memory_format(inp._values, **new_kwargs)
|
396 |
+
|
397 |
+
|
398 |
+
register_jagged_func(
|
399 |
+
torch.ops.aten.is_contiguous.memory_format, "self: jt_all, memory_format: any?"
|
400 |
+
)(is_contiguous_general)
|
401 |
+
|
402 |
+
|
403 |
+
@register_jagged_func(torch.ops.aten.linear.default, "input: jt, weight: t, bias: t?")
|
404 |
+
def linear_default(func, *args, **kwargs):
|
405 |
+
_, new_kwargs = normalize_function(
|
406 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
407 |
+
)
|
408 |
+
|
409 |
+
inp = new_kwargs.pop("input")
|
410 |
+
|
411 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
412 |
+
|
413 |
+
|
414 |
+
@register_jagged_func(
|
415 |
+
torch.ops.aten.linear_backward.default,
|
416 |
+
"self: jt, grad_output: jt, weight: t, output_mask: any",
|
417 |
+
)
|
418 |
+
def linear_backward_default(func, *args, **kwargs):
|
419 |
+
_, new_kwargs = normalize_function(
|
420 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
421 |
+
)
|
422 |
+
|
423 |
+
inp = new_kwargs.pop("input")
|
424 |
+
grad_output = new_kwargs.pop("grad_output")
|
425 |
+
weight = new_kwargs.pop("weight")
|
426 |
+
|
427 |
+
check_ragged_dim_same(func, inp, "self", grad_output, "grad_output")
|
428 |
+
ds = NestedTensor(
|
429 |
+
torch.mm(grad_output._values, weight), **extract_kwargs(grad_output)
|
430 |
+
)
|
431 |
+
dw = torch.mm(grad_output._values.T, inp._values)
|
432 |
+
db = None # NYI: gradient for bias, need to reduce over ragged dim
|
433 |
+
return (ds, dw, db)
|
434 |
+
|
435 |
+
|
436 |
+
@register_jagged_func(torch.ops.aten._to_copy.default, "self: jt_all")
|
437 |
+
def to_copy_default(func, *args, **kwargs):
|
438 |
+
_, new_kwargs = normalize_function(
|
439 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
440 |
+
)
|
441 |
+
|
442 |
+
inp = new_kwargs.pop("input")
|
443 |
+
# don't change layout
|
444 |
+
new_kwargs.pop("layout")
|
445 |
+
|
446 |
+
new_values = func(inp._values, **new_kwargs)
|
447 |
+
# NB: Purposefully keep offsets on the old device.
|
448 |
+
return NestedTensor(new_values, **extract_kwargs(inp))
|
449 |
+
|
450 |
+
|
451 |
+
register_jagged_func(
|
452 |
+
[
|
453 |
+
torch.ops.aten.empty_like.default,
|
454 |
+
torch.ops.aten.ones_like.default,
|
455 |
+
torch.ops.aten.zeros_like.default,
|
456 |
+
torch.ops.aten.randn_like.default,
|
457 |
+
torch.ops.aten.detach.default,
|
458 |
+
],
|
459 |
+
"self: jt_all",
|
460 |
+
)(jagged_unary_pointwise)
|
461 |
+
|
462 |
+
|
463 |
+
register_jagged_func(
|
464 |
+
torch.ops.aten._softmax.default, "self: jt, dim: any, half_to_float: any"
|
465 |
+
)(jagged_unary_pointwise)
|
466 |
+
|
467 |
+
|
468 |
+
@register_jagged_func(
|
469 |
+
torch.ops.aten.native_dropout.default, "self: jt, float: any, train: any?"
|
470 |
+
)
|
471 |
+
def native_dropout_default(func, *args, **kwargs):
|
472 |
+
_, new_kwargs = normalize_function(
|
473 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
474 |
+
)
|
475 |
+
|
476 |
+
inp = new_kwargs.pop("input")
|
477 |
+
out1, out2 = func(inp._values, **new_kwargs)
|
478 |
+
return (
|
479 |
+
NestedTensor(out1, **extract_kwargs(inp)),
|
480 |
+
NestedTensor(out2, **extract_kwargs(inp)),
|
481 |
+
)
|
482 |
+
|
483 |
+
|
484 |
+
@register_jagged_func(
|
485 |
+
torch.ops.aten.native_dropout_backward.default,
|
486 |
+
"grad_output: jt, mask: jt, scale: any",
|
487 |
+
)
|
488 |
+
def native_dropout_backward_default(func, *args, **kwargs):
|
489 |
+
_, new_kwargs = normalize_function(
|
490 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
491 |
+
)
|
492 |
+
grad_output = new_kwargs.pop("grad_output")
|
493 |
+
mask = new_kwargs.pop("mask")
|
494 |
+
return NestedTensor(
|
495 |
+
func(grad_output._values, mask._values, **new_kwargs),
|
496 |
+
**extract_kwargs(grad_output),
|
497 |
+
)
|
498 |
+
|
499 |
+
|
500 |
+
@register_jagged_func(torch.ops.aten.prod.dim_int, "self: jt, dim: any, keepdim: any?")
|
501 |
+
def prod_dim_int(func, *args, **kwargs):
|
502 |
+
_, new_kwargs = normalize_function(
|
503 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
504 |
+
)
|
505 |
+
|
506 |
+
inp = new_kwargs.pop("input")
|
507 |
+
# TODO: Figure out how to handle this better
|
508 |
+
# keep_dim is required to keep it in jagged format
|
509 |
+
if not new_kwargs["keepdim"]:
|
510 |
+
raise RuntimeError("prod(): keepdim=True must be set for NestedTensor")
|
511 |
+
dim = new_kwargs["dim"]
|
512 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), dim, "prod")
|
513 |
+
|
514 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(args[0]))
|
515 |
+
|
516 |
+
|
517 |
+
@register_jagged_func(
|
518 |
+
torch.ops.aten.split.Tensor, "self: jt, split_size: any, dim: any"
|
519 |
+
)
|
520 |
+
def split_tensor(func, *args, **kwargs):
|
521 |
+
_, new_kwargs = normalize_function(
|
522 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
523 |
+
)
|
524 |
+
|
525 |
+
inp = new_kwargs.pop("input")
|
526 |
+
|
527 |
+
new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "split")
|
528 |
+
|
529 |
+
return tuple(
|
530 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
531 |
+
for x in func(inp._values, **new_kwargs)
|
532 |
+
)
|
533 |
+
|
534 |
+
|
535 |
+
@register_jagged_func(
|
536 |
+
torch.ops.aten.split_with_sizes.default, "self: jt, split_sizes: any, dim: any"
|
537 |
+
)
|
538 |
+
def split_with_sizes_default(func, *args, **kwargs):
|
539 |
+
_, new_kwargs = normalize_function(
|
540 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
541 |
+
)
|
542 |
+
|
543 |
+
inp = new_kwargs.pop("input")
|
544 |
+
|
545 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
546 |
+
inp.dim(), new_kwargs["dim"], "split_with_sizes"
|
547 |
+
)
|
548 |
+
|
549 |
+
return [
|
550 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
551 |
+
for x in func(inp._values, **new_kwargs)
|
552 |
+
]
|
553 |
+
|
554 |
+
|
555 |
+
@register_jagged_func(torch.ops.aten.chunk.default, "self: jt, chunks: any, dim: any?")
|
556 |
+
def chunk_default(func, *args, **kwargs):
|
557 |
+
_, new_kwargs = normalize_function(
|
558 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
559 |
+
)
|
560 |
+
|
561 |
+
inp = new_kwargs.pop("input")
|
562 |
+
|
563 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
564 |
+
inp.dim(), new_kwargs["dim"], "chunk", allow_batch_dim=True
|
565 |
+
)
|
566 |
+
|
567 |
+
if new_kwargs["dim"] == 0:
|
568 |
+
chunks = new_kwargs["chunks"]
|
569 |
+
dim0_size = inp._size[0]
|
570 |
+
chunk_size = math.ceil(dim0_size / chunks)
|
571 |
+
|
572 |
+
# get _offsets of the chunks
|
573 |
+
lengths = inp._offsets.diff()
|
574 |
+
chunked_lengths = lengths.chunk(chunks)
|
575 |
+
chunked_offsets = [torch.cumsum(x, dim=0) for x in chunked_lengths]
|
576 |
+
chunked_offsets = [F.pad(x, (1, 0), value=0) for x in chunked_offsets]
|
577 |
+
nested_kwargs = [
|
578 |
+
{"offsets": per_offsets, "_ragged_idx": inp._ragged_idx}
|
579 |
+
for per_offsets in chunked_offsets
|
580 |
+
]
|
581 |
+
|
582 |
+
# get _values of the chunks
|
583 |
+
split_sizes = [x.sum().item() for x in chunked_lengths]
|
584 |
+
chunk_values = inp._values.split(split_sizes)
|
585 |
+
|
586 |
+
return [
|
587 |
+
NestedTensor(values=chunk_values[i], **(nested_kwargs[i]))
|
588 |
+
for i in range(0, chunk_size)
|
589 |
+
]
|
590 |
+
else:
|
591 |
+
return [
|
592 |
+
NestedTensor(values=x, **extract_kwargs(inp))
|
593 |
+
for x in func(inp._values, **new_kwargs)
|
594 |
+
]
|
595 |
+
|
596 |
+
|
597 |
+
@register_jagged_func(torch.ops.aten.unbind.int, "self: jt_all, dim: any?")
|
598 |
+
def unbind_int(func, *args, **kwargs):
|
599 |
+
# Note that this specializes on the length of the offsets
|
600 |
+
_, new_kwargs = normalize_function(
|
601 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
602 |
+
)
|
603 |
+
|
604 |
+
dim = new_kwargs["dim"]
|
605 |
+
if dim != 0:
|
606 |
+
raise RuntimeError("unbind(): only supported for NestedTensor on dim=0")
|
607 |
+
|
608 |
+
inp = new_kwargs.pop("input")
|
609 |
+
values = inp.values()
|
610 |
+
offsets = inp.offsets()
|
611 |
+
lengths = inp.lengths()
|
612 |
+
|
613 |
+
if inp._ragged_idx != 1:
|
614 |
+
raise RuntimeError(
|
615 |
+
"unbind(): only supported for NestedTensor when jagged dimension is 1"
|
616 |
+
)
|
617 |
+
|
618 |
+
if lengths is None:
|
619 |
+
return torch.split(values, offsets.diff().tolist())
|
620 |
+
return [
|
621 |
+
values[offsets[i] : (offsets[i] + lengths[i])] for i in range(lengths.shape[0])
|
622 |
+
]
|
623 |
+
|
624 |
+
|
625 |
+
@register_jagged_func(torch.ops.aten.squeeze.dim, "self: jt, dim: any")
|
626 |
+
def squeeze_dim(func, *args, **kwargs):
|
627 |
+
_, new_kwargs = normalize_function(
|
628 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
629 |
+
)
|
630 |
+
|
631 |
+
inp = new_kwargs.pop("input")
|
632 |
+
values = inp._values
|
633 |
+
|
634 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size), new_kwargs["dim"], "squeeze")
|
635 |
+
return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
|
636 |
+
|
637 |
+
|
638 |
+
@register_jagged_func(torch.ops.aten.unsqueeze.default, "self: jt, dim: any")
|
639 |
+
def unsqueeze_default(func, *args, **kwargs):
|
640 |
+
_, new_kwargs = normalize_function(
|
641 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
642 |
+
)
|
643 |
+
|
644 |
+
inp = new_kwargs.pop("input")
|
645 |
+
values = inp._values
|
646 |
+
|
647 |
+
# Account for collapsed jagged dim
|
648 |
+
dim = new_kwargs["dim"]
|
649 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(inp._size) + 1, dim, "unsqueeze")
|
650 |
+
return NestedTensor(func(values, **new_kwargs), **extract_kwargs(inp))
|
651 |
+
|
652 |
+
|
653 |
+
@register_jagged_func(torch.ops.aten.cat.default, "tensors: any, dim: any")
|
654 |
+
def cat_default(func, *args, **kwargs):
|
655 |
+
_, new_kwargs = normalize_function(
|
656 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
657 |
+
)
|
658 |
+
|
659 |
+
tensors = new_kwargs.pop("tensors")
|
660 |
+
|
661 |
+
# Convert any non-nested to nested
|
662 |
+
nested = [t for t in tensors if t.is_nested]
|
663 |
+
assert len(nested) > 0
|
664 |
+
first = nested[0]
|
665 |
+
tensors = [t if t.is_nested else t.expand_as(first) for t in tensors]
|
666 |
+
|
667 |
+
# Account for collapsed jagged dim
|
668 |
+
dim = new_kwargs["dim"]
|
669 |
+
new_kwargs["dim"] = _wrap_jagged_dim(len(first.shape), dim, "cat")
|
670 |
+
|
671 |
+
return NestedTensor(
|
672 |
+
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
|
673 |
+
)
|
674 |
+
|
675 |
+
|
676 |
+
@register_jagged_func(torch.ops.aten.matmul.default, "self: jt, other: any")
|
677 |
+
def matmul_default(func, *args, **kwargs):
|
678 |
+
_, new_kwargs = normalize_function(
|
679 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
680 |
+
)
|
681 |
+
|
682 |
+
inp = new_kwargs.pop("input")
|
683 |
+
other = new_kwargs.pop("other")
|
684 |
+
|
685 |
+
if inp.is_nested and not other.is_nested:
|
686 |
+
return NestedTensor(
|
687 |
+
func(inp._values, other, **new_kwargs), **extract_kwargs(inp)
|
688 |
+
)
|
689 |
+
elif inp.is_nested and other.is_nested:
|
690 |
+
# BMM with equivalent ragged dims between the two inputs
|
691 |
+
if inp.dim() > 3 and other.dim() > 3 and raggedness_matches(inp, other._size):
|
692 |
+
return NestedTensor(func(inp._values, other._values), **extract_kwargs(inp))
|
693 |
+
|
694 |
+
raise RuntimeError(
|
695 |
+
f"matmul(): not supported between inputs of shapes {inp._size} and {other.shape}"
|
696 |
+
)
|
697 |
+
|
698 |
+
|
699 |
+
@register_jagged_func(
|
700 |
+
torch.ops.aten.expand.default, "self: jt, size: any, implicit: any?"
|
701 |
+
)
|
702 |
+
def expand_default(func, *args, **kwargs):
|
703 |
+
_, new_kwargs = normalize_function(
|
704 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
705 |
+
)
|
706 |
+
|
707 |
+
inp = new_kwargs.pop("input")
|
708 |
+
size = new_kwargs["size"]
|
709 |
+
|
710 |
+
assert ("implicit" not in new_kwargs) or (not new_kwargs.pop("implicit"))
|
711 |
+
if not raggedness_matches(inp, size):
|
712 |
+
raise RuntimeError(f"expand(): cannot expand shape {inp._size} -> {size}")
|
713 |
+
|
714 |
+
expand_arg = [-1, *size[2:]]
|
715 |
+
return NestedTensor(func(inp._values, expand_arg), **extract_kwargs(inp))
|
716 |
+
|
717 |
+
|
718 |
+
@register_jagged_func(torch.ops.aten.expand_as.default, "self: t, other: jt")
|
719 |
+
def expand_as_default(func, *args, **kwargs):
|
720 |
+
_, new_kwargs = normalize_function(
|
721 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
722 |
+
)
|
723 |
+
|
724 |
+
inp = new_kwargs.pop("input")
|
725 |
+
other = new_kwargs.pop("other")
|
726 |
+
|
727 |
+
return NestedTensor(func(inp, other._values), **extract_kwargs(other))
|
728 |
+
|
729 |
+
|
730 |
+
@register_jagged_func(torch.ops.aten.where.self, "condition: jt, self: jt, other: jt")
|
731 |
+
def where_self(func, *args, **kwargs):
|
732 |
+
_, new_kwargs = normalize_function(
|
733 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
734 |
+
)
|
735 |
+
|
736 |
+
condition = new_kwargs.pop("condition")
|
737 |
+
inp = new_kwargs.pop("input")
|
738 |
+
other = new_kwargs.pop("other")
|
739 |
+
|
740 |
+
assert condition._size == other._size == inp._size
|
741 |
+
|
742 |
+
return NestedTensor(
|
743 |
+
func(condition._values, inp._values, other._values, **new_kwargs),
|
744 |
+
**extract_kwargs(condition),
|
745 |
+
)
|
746 |
+
|
747 |
+
|
748 |
+
@register_jagged_func(torch.ops.aten._pin_memory.default, "self: jt, device: any?")
|
749 |
+
def _pin_memory_default(func, *args, **kwargs):
|
750 |
+
_, new_kwargs = normalize_function(
|
751 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
752 |
+
)
|
753 |
+
|
754 |
+
inp = new_kwargs.pop("input")
|
755 |
+
|
756 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
757 |
+
|
758 |
+
|
759 |
+
@register_jagged_func(torch.ops.aten.is_pinned.default, "self: jt, device: any?")
|
760 |
+
def is_pinned_default(func, *args, **kwargs):
|
761 |
+
_, new_kwargs = normalize_function(
|
762 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
763 |
+
)
|
764 |
+
|
765 |
+
inp = new_kwargs.pop("input")
|
766 |
+
|
767 |
+
return func(inp._values, **new_kwargs)
|
768 |
+
|
769 |
+
|
770 |
+
@register_jagged_func(
|
771 |
+
torch.ops.aten.is_same_size.default, "self: jt_all, other: jt_all"
|
772 |
+
)
|
773 |
+
def is_same_size_default(func, *args, **kwargs):
|
774 |
+
return args[0]._size == args[1]._size
|
775 |
+
|
776 |
+
|
777 |
+
@register_jagged_func(
|
778 |
+
torch.ops.aten.sum.dim_IntList, "self: jt, dim: any?, keepdim: any?, dtype: any?"
|
779 |
+
)
|
780 |
+
def sum_dim_IntList(func, *args, **kwargs):
|
781 |
+
# sum_dim_IntList can produce a NT or a T depending on whether the ragged dims
|
782 |
+
# are reduced away.
|
783 |
+
_, new_kwargs = normalize_function(
|
784 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
785 |
+
)
|
786 |
+
inp = new_kwargs.pop("input")
|
787 |
+
assert inp._ragged_idx == 1
|
788 |
+
new_kwargs["dim"], ragged_reduced_away = _wrap_jagged_dims(
|
789 |
+
inp.dim(), new_kwargs["dim"], "sum"
|
790 |
+
)
|
791 |
+
|
792 |
+
if not ragged_reduced_away:
|
793 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
794 |
+
else:
|
795 |
+
# Don't wrap because we reduced away the raggedness
|
796 |
+
out = func(inp._values, **new_kwargs)
|
797 |
+
if new_kwargs["keepdim"]:
|
798 |
+
out = out.unsqueeze(0)
|
799 |
+
return out
|
800 |
+
|
801 |
+
|
802 |
+
@register_jagged_func(
|
803 |
+
torch.ops.aten.transpose.int, "self: jt_all, dim0: any, dim1: any"
|
804 |
+
)
|
805 |
+
def transpose_int(func, *args, **kwargs):
|
806 |
+
_, new_kwargs = normalize_function(
|
807 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
808 |
+
)
|
809 |
+
|
810 |
+
from torch._prims_common import canonicalize_dims
|
811 |
+
|
812 |
+
inp = new_kwargs.pop("input")
|
813 |
+
dim0, dim1 = canonicalize_dims(inp.dim(), (new_kwargs["dim0"], new_kwargs["dim1"]))
|
814 |
+
|
815 |
+
if inp._lengths is not None:
|
816 |
+
raise ValueError(
|
817 |
+
"transpose(): not supported on jagged layout nested tensor with holes"
|
818 |
+
)
|
819 |
+
|
820 |
+
# To support the SDPA API, inputs need to have the ragged idx transposed to dim 2
|
821 |
+
# instead of 1, although the internal Flash and mem-effn implementations will
|
822 |
+
# use the inputs with raggedness in dim 1.
|
823 |
+
if dim0 == inp._ragged_idx or dim1 == inp._ragged_idx:
|
824 |
+
if dim0 == 0 or dim1 == 0:
|
825 |
+
raise ValueError(
|
826 |
+
"Transpose is not supported on the batch dimension for jagged NT"
|
827 |
+
)
|
828 |
+
if dim0 == inp._ragged_idx:
|
829 |
+
to_dim = dim1
|
830 |
+
else:
|
831 |
+
to_dim = dim0
|
832 |
+
inp_kwargs = extract_kwargs(inp)
|
833 |
+
inp_kwargs["_ragged_idx"] = to_dim
|
834 |
+
return NestedTensor(
|
835 |
+
inp.values().transpose(
|
836 |
+
_outer_to_inner_dim(len(inp._size), dim0),
|
837 |
+
_outer_to_inner_dim(len(inp._size), dim1),
|
838 |
+
),
|
839 |
+
**inp_kwargs,
|
840 |
+
)
|
841 |
+
|
842 |
+
new_kwargs["dim0"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim0"], "transpose")
|
843 |
+
new_kwargs["dim1"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim1"], "transpose")
|
844 |
+
|
845 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
846 |
+
|
847 |
+
|
848 |
+
@register_jagged_func(
|
849 |
+
[torch.ops.aten.view.default, torch.ops.aten._unsafe_view.default],
|
850 |
+
"self: jt_all, size: any",
|
851 |
+
)
|
852 |
+
def view_default(func, *args, **kwargs):
|
853 |
+
_, new_kwargs = normalize_function(
|
854 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
855 |
+
)
|
856 |
+
|
857 |
+
inp = new_kwargs.pop("input")
|
858 |
+
size = new_kwargs.pop("size")
|
859 |
+
|
860 |
+
if inp._ragged_idx != 1 and tuple(inp._size) != tuple(size):
|
861 |
+
raise RuntimeError(
|
862 |
+
f"view(): does not support ragged_idx != 1 except when inp._size == size. "
|
863 |
+
f"inp._size is ({inp._size}) and size is ({size})."
|
864 |
+
)
|
865 |
+
|
866 |
+
# Ensure specified size still includes batch and ragged dims
|
867 |
+
if len(size) < 3 or not raggedness_matches(inp, size):
|
868 |
+
raise RuntimeError(f"view(): cannot view shape {inp._size} as {size}")
|
869 |
+
|
870 |
+
# outer size: the size of the NT, e.g. [3, j0, 10]
|
871 |
+
# inner size: the size of the values, e.g. [8, 10] (e.g. for offsets = [0, 3, 5, 8])
|
872 |
+
# this function gets inner_size[inner_idx] for a given inner_idx.
|
873 |
+
#
|
874 |
+
# example: for outer size [a, b, c, j0, d, e, f]
|
875 |
+
# assume that j0 is ragged, other are concrete integers
|
876 |
+
# and ragged_idx=3
|
877 |
+
# inner size will be [b, c, inp._values.size(ragged_idx), d, e, f]
|
878 |
+
# therefore:
|
879 |
+
# inner_size[0] = outer_size[1]
|
880 |
+
# inner_size[1] = outer_size[2]
|
881 |
+
# inner_size[0] = inp._values.size(ragged_idx - 1)
|
882 |
+
# inner_size[3] = outer_size[4]
|
883 |
+
# inner_size[4] = outer_size[5]
|
884 |
+
def get_inner_size(inner_idx):
|
885 |
+
nonlocal inp, size
|
886 |
+
if inner_idx == inp._ragged_idx - 1:
|
887 |
+
return inp._values.size(inner_idx)
|
888 |
+
else:
|
889 |
+
return size[inner_idx + 1]
|
890 |
+
|
891 |
+
inner_size = [get_inner_size(i) for i in range(len(size) - 1)]
|
892 |
+
|
893 |
+
return NestedTensor(func(inp._values, inner_size), **extract_kwargs(inp))
|
894 |
+
|
895 |
+
|
896 |
+
@register_jagged_func(
|
897 |
+
torch.ops.aten.native_layer_norm.default,
|
898 |
+
"input: jt, normalized_shape: any, weight: any?, bias: any?, eps: any",
|
899 |
+
)
|
900 |
+
def native_layer_norm_default(func, *args, **kwargs):
|
901 |
+
_, new_kwargs = normalize_function(
|
902 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
903 |
+
)
|
904 |
+
|
905 |
+
inp = new_kwargs.pop("input")
|
906 |
+
normalized_shape = new_kwargs["normalized_shape"]
|
907 |
+
|
908 |
+
# Ensure we're not trying to normalize over the ragged dim
|
909 |
+
if inp.dim() < 3 or (inp.dim() - len(normalized_shape)) < 2:
|
910 |
+
raise RuntimeError(
|
911 |
+
"layer_norm(): normalizing over ragged dim not supported for nested tensors"
|
912 |
+
)
|
913 |
+
|
914 |
+
output, mean, std = func(inp._values, **new_kwargs)
|
915 |
+
return (NestedTensor(output, **extract_kwargs(inp)), mean, std)
|
916 |
+
|
917 |
+
|
918 |
+
@register_jagged_func(
|
919 |
+
torch.ops.aten.native_layer_norm_backward.default,
|
920 |
+
"grad_out: jt, input: jt, normalized_shape: any, mean: any, rstd: any, weight: any?, bias: any?, output_mask: any",
|
921 |
+
)
|
922 |
+
def native_layer_norm_backward_default(func, *args, **kwargs):
|
923 |
+
_, new_kwargs = normalize_function(
|
924 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
925 |
+
)
|
926 |
+
grad_out = new_kwargs.pop("grad_out")
|
927 |
+
inp = new_kwargs.pop("input")
|
928 |
+
d_input, d_gamma, d_beta = func(grad_out._values, inp._values, **new_kwargs)
|
929 |
+
if d_input is None:
|
930 |
+
return (None, d_gamma, d_beta)
|
931 |
+
|
932 |
+
return (NestedTensor(d_input, **extract_kwargs(inp)), d_gamma, d_beta)
|
933 |
+
|
934 |
+
|
935 |
+
@register_jagged_func(torch.ops.aten.select.int, "self: jt, dim: any, index: any")
|
936 |
+
def select_int(func, *args, **kwargs):
|
937 |
+
_, new_kwargs = normalize_function(
|
938 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
939 |
+
)
|
940 |
+
|
941 |
+
inp = new_kwargs.pop("input")
|
942 |
+
new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "select")
|
943 |
+
|
944 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
945 |
+
|
946 |
+
|
947 |
+
@register_jagged_func(
|
948 |
+
torch.ops.aten.slice.Tensor,
|
949 |
+
"self: jt, dim: any?, start: any?, end: any?, step: any?",
|
950 |
+
)
|
951 |
+
def slice_tensor(func, *args, **kwargs):
|
952 |
+
_, new_kwargs = normalize_function(
|
953 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
954 |
+
)
|
955 |
+
|
956 |
+
inp = new_kwargs.pop("input")
|
957 |
+
new_kwargs["dim"] = _wrap_jagged_dim(inp.dim(), new_kwargs["dim"], "slice")
|
958 |
+
|
959 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
960 |
+
|
961 |
+
|
962 |
+
@register_jagged_func(
|
963 |
+
torch.ops.aten.convolution.default,
|
964 |
+
"input: jt, weight: t, bias: t?, stride: any, padding: any, "
|
965 |
+
"dilation: any, transposed: any, output_padding: any, groups: any",
|
966 |
+
)
|
967 |
+
def convolution_default(func, *args, **kwargs):
|
968 |
+
_, new_kwargs = normalize_function(
|
969 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
970 |
+
)
|
971 |
+
|
972 |
+
inp = new_kwargs.pop("input")
|
973 |
+
|
974 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
975 |
+
|
976 |
+
|
977 |
+
@register_jagged_func(
|
978 |
+
torch.ops.aten.mean.dim, "self: jt, dim: any?, keepdim: any, dtype: any?"
|
979 |
+
)
|
980 |
+
def mean_dim(func, *args, **kwargs):
|
981 |
+
_, new_kwargs = normalize_function(
|
982 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
983 |
+
)
|
984 |
+
|
985 |
+
inp = new_kwargs.pop("input")
|
986 |
+
# NB: mean expects dim as a single item list of ints for some reason
|
987 |
+
new_kwargs["dim"] = [_wrap_jagged_dim(inp.dim(), new_kwargs["dim"][0], "mean")]
|
988 |
+
|
989 |
+
return NestedTensor(func(inp._values, **new_kwargs), **extract_kwargs(inp))
|
990 |
+
|
991 |
+
|
992 |
+
@register_jagged_func(torch.ops.aten.stack.default, "tensors: any, dim: any")
|
993 |
+
def stack_default(func, *args, **kwargs):
|
994 |
+
_, new_kwargs = normalize_function(
|
995 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
996 |
+
)
|
997 |
+
|
998 |
+
# guaranteed this is non-empty if we got here
|
999 |
+
tensors = new_kwargs.pop("tensors")
|
1000 |
+
for t in tensors:
|
1001 |
+
if not isinstance(t, NestedTensor):
|
1002 |
+
raise RuntimeError("stack(): expected all nested tensors inputs")
|
1003 |
+
|
1004 |
+
if t.dim() != tensors[0].dim():
|
1005 |
+
raise RuntimeError(
|
1006 |
+
"stack(): expected all nested tensors to have the same dim"
|
1007 |
+
)
|
1008 |
+
|
1009 |
+
if not raggedness_matches(t, tensors[0].shape):
|
1010 |
+
raise RuntimeError(
|
1011 |
+
"stack(): expected all nested tensors to have the same nested structure"
|
1012 |
+
)
|
1013 |
+
|
1014 |
+
new_kwargs["dim"] = _wrap_jagged_dim(
|
1015 |
+
tensors[0].dim() + 1, new_kwargs["dim"], "stack"
|
1016 |
+
)
|
1017 |
+
|
1018 |
+
return NestedTensor(
|
1019 |
+
func([t._values for t in tensors], **new_kwargs), **extract_kwargs(tensors[0])
|
1020 |
+
)
|
1021 |
+
|
1022 |
+
|
1023 |
+
@register_jagged_func(
|
1024 |
+
torch.ops.aten.embedding.default,
|
1025 |
+
"weight: t, indices: jt, padding_idx: any?, scale_grad_by_freq: any?, sparse: any?",
|
1026 |
+
)
|
1027 |
+
def embedding_default(func, *args, **kwargs):
|
1028 |
+
_, new_kwargs = normalize_function(
|
1029 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1030 |
+
)
|
1031 |
+
|
1032 |
+
# guaranteed this is non-empty if we got here
|
1033 |
+
indices = new_kwargs.pop("indices")
|
1034 |
+
weight = new_kwargs.pop("weight")
|
1035 |
+
|
1036 |
+
return NestedTensor(
|
1037 |
+
func(weight, indices._values, **new_kwargs), **extract_kwargs(indices)
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
|
1041 |
+
@register_jagged_func(
|
1042 |
+
[
|
1043 |
+
torch.ops.aten.values.default,
|
1044 |
+
torch.ops.aten._nested_get_values.default,
|
1045 |
+
],
|
1046 |
+
"self: jt_all",
|
1047 |
+
)
|
1048 |
+
def values_default(func, *args, **kwargs):
|
1049 |
+
_, new_kwargs = normalize_function(
|
1050 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1051 |
+
)
|
1052 |
+
|
1053 |
+
inp = new_kwargs.pop("input")
|
1054 |
+
|
1055 |
+
# TODO: Handle inference mode properly.
|
1056 |
+
# See https://github.com/pytorch/pytorch/issues/112024#issuecomment-1779554292
|
1057 |
+
return inp._values.detach()
|
1058 |
+
|
1059 |
+
|
1060 |
+
@register_jagged_func(
|
1061 |
+
torch.ops.aten._nested_view_from_jagged.default,
|
1062 |
+
"values: t, offsets: t, dummy: jt_all, lengths: t?, ragged_idx: any?",
|
1063 |
+
)
|
1064 |
+
def _nested_view_from_jagged_default(func, *args, **kwargs):
|
1065 |
+
_, new_kwargs = normalize_function(
|
1066 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1067 |
+
)
|
1068 |
+
|
1069 |
+
values, offsets, lengths = (
|
1070 |
+
new_kwargs["input"],
|
1071 |
+
new_kwargs["offsets"],
|
1072 |
+
new_kwargs["lengths"],
|
1073 |
+
)
|
1074 |
+
ragged_idx = new_kwargs["ragged_idx"]
|
1075 |
+
|
1076 |
+
return NestedTensor(values, offsets, lengths=lengths, _ragged_idx=ragged_idx)
|
1077 |
+
|
1078 |
+
|
1079 |
+
@register_jagged_func(torch.ops.aten._nested_get_offsets.default, "self: jt_all")
|
1080 |
+
def _nested_get_offsets(func, *args, **kwargs):
|
1081 |
+
_, new_kwargs = normalize_function(
|
1082 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1083 |
+
)
|
1084 |
+
|
1085 |
+
inp = new_kwargs.pop("input")
|
1086 |
+
return inp._offsets
|
1087 |
+
|
1088 |
+
|
1089 |
+
@register_jagged_func(torch.ops.aten._nested_get_lengths.default, "self: jt_all")
|
1090 |
+
def _nested_get_lengths(func, *args, **kwargs):
|
1091 |
+
_, new_kwargs = normalize_function(
|
1092 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1093 |
+
)
|
1094 |
+
|
1095 |
+
inp = new_kwargs.pop("input")
|
1096 |
+
return inp._lengths
|
1097 |
+
|
1098 |
+
|
1099 |
+
@register_jagged_func(torch.ops.aten._nested_get_ragged_idx.default, "self: jt_all")
|
1100 |
+
def _nested_get_ragged_idx(func, *args, **kwargs):
|
1101 |
+
_, new_kwargs = normalize_function(
|
1102 |
+
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
|
1103 |
+
)
|
1104 |
+
|
1105 |
+
inp = new_kwargs.pop("input")
|
1106 |
+
return inp._ragged_idx
|
1107 |
+
|
1108 |
+
|
1109 |
+
# Make the dummy available on the C++ side.
|
1110 |
+
@register_jagged_func(torch.ops.aten._nested_get_jagged_dummy.default, "self: any")
|
1111 |
+
def _nested_get_jagged_dummy(func, *args, **kwargs):
|
1112 |
+
from torch.nested._internal.nested_tensor import _nt_view_dummy
|
1113 |
+
|
1114 |
+
return _nt_view_dummy
|
1115 |
+
|
1116 |
+
|
1117 |
+
with torch.library._scoped_library("aten", "IMPL") as aten:
|
1118 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CPU")
|
1119 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "CUDA")
|
1120 |
+
aten.impl("_nested_get_jagged_dummy", _nested_get_jagged_dummy, "Meta")
|
llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/sdpa.py
ADDED
@@ -0,0 +1,780 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from typing import Optional, Tuple
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn
|
6 |
+
import torch.nn.functional as F
|
7 |
+
from torch.backends.cuda import (
|
8 |
+
can_use_efficient_attention,
|
9 |
+
can_use_flash_attention,
|
10 |
+
flash_sdp_enabled,
|
11 |
+
math_sdp_enabled,
|
12 |
+
mem_efficient_sdp_enabled,
|
13 |
+
SDPAParams,
|
14 |
+
)
|
15 |
+
|
16 |
+
from torch.nn.attention import SDPBackend
|
17 |
+
from .nested_tensor import NestedTensor
|
18 |
+
|
19 |
+
log = logging.getLogger(__name__)
|
20 |
+
|
21 |
+
|
22 |
+
def _validate_sdpa_input(
|
23 |
+
query: torch.Tensor,
|
24 |
+
key: torch.Tensor,
|
25 |
+
value: torch.Tensor,
|
26 |
+
attn_mask: Optional[torch.Tensor] = None,
|
27 |
+
dropout_p=0.0,
|
28 |
+
is_causal=False,
|
29 |
+
scale=None,
|
30 |
+
):
|
31 |
+
if (
|
32 |
+
not isinstance(query, NestedTensor)
|
33 |
+
or not isinstance(key, NestedTensor)
|
34 |
+
or not isinstance(value, NestedTensor)
|
35 |
+
):
|
36 |
+
raise ValueError(
|
37 |
+
f"Expected query, key, and value to be nested tensors, "
|
38 |
+
f"but got query.is_nested: {query.is_nested}, key.is_nested: {key.is_nested}, "
|
39 |
+
f"and value.is_nested: {value.is_nested} instead."
|
40 |
+
)
|
41 |
+
if query.dtype != key.dtype or query.dtype != value.dtype:
|
42 |
+
raise ValueError(
|
43 |
+
f"Expected query, key, and value to have the same dtype, "
|
44 |
+
f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, "
|
45 |
+
f"and value.dtype: {value.dtype} instead."
|
46 |
+
)
|
47 |
+
if query.device != key.device or query.device != value.device:
|
48 |
+
raise ValueError(
|
49 |
+
f"Expected query, key, and value to have the same device type, "
|
50 |
+
f"but got query.device: {query.device}, key.device: {key.device}, "
|
51 |
+
f"and value.device: {value.device} instead."
|
52 |
+
)
|
53 |
+
if query.dim() < 2 or key.dim() < 2 or value.dim() < 2:
|
54 |
+
raise ValueError(
|
55 |
+
f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: "
|
56 |
+
f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead."
|
57 |
+
)
|
58 |
+
if query._ragged_idx != key._ragged_idx or query._ragged_idx != value._ragged_idx:
|
59 |
+
raise ValueError(
|
60 |
+
f"Expected query, key, and value to all be ragged on the same dimension, but got ragged "
|
61 |
+
f"dims {query._ragged_idx}, {key._ragged_idx}, and {value._ragged_idx}, respectively."
|
62 |
+
)
|
63 |
+
if attn_mask is not None:
|
64 |
+
# TODO: Figure out whether masks are actually supported for this layout or not
|
65 |
+
raise ValueError("Masks are not yet supported!")
|
66 |
+
if attn_mask.dtype != torch.bool and attn_mask.dtype != query.dtype:
|
67 |
+
raise ValueError(
|
68 |
+
f"Expected attn_mask dtype to be bool or to match query dtype, but got attn_mask.dtype: "
|
69 |
+
f"{attn_mask.dtype}, and query.dtype: {query.dtype} instead."
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
+
def _check_batch_size_nested(params: SDPAParams, debug=False) -> bool:
|
74 |
+
# This is expected to be called after check_tensor_shapes ensuring that the
|
75 |
+
# size() calls won't error since the inputs are all 4 dimensional
|
76 |
+
q_batch_size = params.query.size(0)
|
77 |
+
k_batch_size = params.key.size(0)
|
78 |
+
v_batch_size = params.value.size(0)
|
79 |
+
|
80 |
+
# num_heads logic for nested input is checked in
|
81 |
+
# check_for_seq_len_0_nested_tensor as there is handling there to make sure
|
82 |
+
# num_heads is not ragged
|
83 |
+
return q_batch_size == k_batch_size and q_batch_size == v_batch_size
|
84 |
+
|
85 |
+
|
86 |
+
def _check_head_dim_size_flash_nested(params: SDPAParams, debug=False) -> bool:
|
87 |
+
max_size = 256
|
88 |
+
query_size_last = params.query.size(-1)
|
89 |
+
key_size_last = params.key.size(-1)
|
90 |
+
value_size_last = params.value.size(-1)
|
91 |
+
same_head_dim_size = (
|
92 |
+
query_size_last == key_size_last and query_size_last == value_size_last
|
93 |
+
)
|
94 |
+
if not (
|
95 |
+
same_head_dim_size
|
96 |
+
and (query_size_last % 8 == 0)
|
97 |
+
and (query_size_last <= max_size)
|
98 |
+
):
|
99 |
+
if debug:
|
100 |
+
log.warning(
|
101 |
+
"For NestedTensor inputs, Flash attention requires q,k,v to have the same "
|
102 |
+
"last dimension and to be a multiple of 8 and less than or equal to 256. "
|
103 |
+
"Got Query.size(-1): %d, Key.size(-1): %d, Value.size(-1): %d instead.",
|
104 |
+
query_size_last,
|
105 |
+
key_size_last,
|
106 |
+
value_size_last,
|
107 |
+
)
|
108 |
+
return False
|
109 |
+
return True
|
110 |
+
|
111 |
+
|
112 |
+
def _check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
113 |
+
param: torch.Tensor, param_name: str, debug=False
|
114 |
+
) -> bool:
|
115 |
+
assert isinstance(param, NestedTensor), "param should be a jagged NT"
|
116 |
+
|
117 |
+
if param._ragged_idx == 1:
|
118 |
+
# num_head_dims is ragged
|
119 |
+
if debug:
|
120 |
+
log.warning(
|
121 |
+
"Fused kernels do not support ragged num_head_dims, %s has a ragged num_heads.",
|
122 |
+
param_name,
|
123 |
+
)
|
124 |
+
return False
|
125 |
+
|
126 |
+
# This is being called inside sdp with shape [batch, heads, {seq_len}, dim]
|
127 |
+
if param._min_seqlen == 0:
|
128 |
+
if debug:
|
129 |
+
log.warning(
|
130 |
+
"Fused kernels do not support seq_len == 0, %s has a seq len of 0.",
|
131 |
+
param_name,
|
132 |
+
)
|
133 |
+
return False
|
134 |
+
|
135 |
+
return True
|
136 |
+
|
137 |
+
|
138 |
+
def _try_broadcast_param_size(q_size, k_size, v_size, param_name, debug=False) -> bool:
|
139 |
+
max_size = max(q_size, k_size, v_size)
|
140 |
+
if (
|
141 |
+
(q_size != max_size and q_size != 1)
|
142 |
+
or (k_size != max_size and k_size != 1)
|
143 |
+
or (v_size != max_size and v_size != 1)
|
144 |
+
):
|
145 |
+
if debug:
|
146 |
+
log.warning(
|
147 |
+
"Both fused kernels require query, key and value to have broadcastable %s, "
|
148 |
+
"got Query %s %d, Key %s %d, Value %s %d instead.",
|
149 |
+
param_name,
|
150 |
+
param_name,
|
151 |
+
q_size,
|
152 |
+
param_name,
|
153 |
+
k_size,
|
154 |
+
param_name,
|
155 |
+
v_size,
|
156 |
+
)
|
157 |
+
return False
|
158 |
+
return True
|
159 |
+
|
160 |
+
|
161 |
+
def _check_for_seq_len_0_nested(params: SDPAParams, debug=False) -> bool:
|
162 |
+
# When this function is called we are assured that the nt is dim==4
|
163 |
+
q_is_safe = (
|
164 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
165 |
+
params.query, "query", debug
|
166 |
+
)
|
167 |
+
if params.query.is_nested
|
168 |
+
else True
|
169 |
+
)
|
170 |
+
# short circuit if any is unsafe
|
171 |
+
if not q_is_safe:
|
172 |
+
return False
|
173 |
+
|
174 |
+
k_is_safe = (
|
175 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
176 |
+
params.key, "key", debug
|
177 |
+
)
|
178 |
+
if params.key.is_nested
|
179 |
+
else True
|
180 |
+
)
|
181 |
+
# short circuit if any is unsafe
|
182 |
+
if not k_is_safe:
|
183 |
+
return False
|
184 |
+
|
185 |
+
v_is_safe = (
|
186 |
+
_check_for_seq_len_0_and_consistent_head_dim_nested_helper(
|
187 |
+
params.value, "value", debug
|
188 |
+
)
|
189 |
+
if params.value.is_nested
|
190 |
+
else True
|
191 |
+
)
|
192 |
+
# short circuit if any is unsafe
|
193 |
+
if not v_is_safe:
|
194 |
+
return False
|
195 |
+
|
196 |
+
# We now know none of the inputs have ragged num_heads, so we can safely
|
197 |
+
# access .size(1)
|
198 |
+
q_num_heads = params.query.size(1)
|
199 |
+
k_num_heads = params.key.size(1)
|
200 |
+
v_num_heads = params.value.size(1)
|
201 |
+
same_num_heads = q_num_heads == k_num_heads and q_num_heads == v_num_heads
|
202 |
+
|
203 |
+
if not same_num_heads:
|
204 |
+
if (
|
205 |
+
params.query.requires_grad
|
206 |
+
or params.key.requires_grad
|
207 |
+
or params.value.requires_grad
|
208 |
+
):
|
209 |
+
if debug:
|
210 |
+
log.warning(
|
211 |
+
"Both fused kernels do not support training with broadcasted NT inputs."
|
212 |
+
)
|
213 |
+
return False
|
214 |
+
return _try_broadcast_param_size(
|
215 |
+
q_num_heads, k_num_heads, v_num_heads, "num heads", debug
|
216 |
+
)
|
217 |
+
return True
|
218 |
+
|
219 |
+
|
220 |
+
def _can_use_flash_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
221 |
+
constraints = (
|
222 |
+
_check_batch_size_nested,
|
223 |
+
_check_head_dim_size_flash_nested,
|
224 |
+
_check_for_seq_len_0_nested,
|
225 |
+
)
|
226 |
+
for constraint in constraints:
|
227 |
+
if not constraint(params, debug):
|
228 |
+
return False
|
229 |
+
return True
|
230 |
+
|
231 |
+
|
232 |
+
def _can_use_efficient_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
233 |
+
constraints = (
|
234 |
+
_check_batch_size_nested,
|
235 |
+
_check_for_seq_len_0_nested,
|
236 |
+
)
|
237 |
+
for constraint in constraints:
|
238 |
+
if not constraint(params, debug):
|
239 |
+
return False
|
240 |
+
return True
|
241 |
+
|
242 |
+
|
243 |
+
def _can_use_math_sdpa_jagged(params: SDPAParams, debug=False) -> bool:
|
244 |
+
if (
|
245 |
+
not params.query.transpose(1, 2).is_contiguous()
|
246 |
+
or not params.key.transpose(1, 2).is_contiguous()
|
247 |
+
or not params.value.transpose(1, 2).is_contiguous()
|
248 |
+
):
|
249 |
+
if debug:
|
250 |
+
log.warning(
|
251 |
+
"If inputs are nested tensors they must be contiguous after transposing."
|
252 |
+
)
|
253 |
+
return False
|
254 |
+
if params.is_causal:
|
255 |
+
if debug:
|
256 |
+
log.warning(
|
257 |
+
"Nested tensors for query / key are not supported when is_causal=True."
|
258 |
+
)
|
259 |
+
return False
|
260 |
+
return True
|
261 |
+
|
262 |
+
|
263 |
+
def _select_sdp_backend(query, key, value, attn_mask, dropout, is_causal):
|
264 |
+
if (
|
265 |
+
not flash_sdp_enabled()
|
266 |
+
and not mem_efficient_sdp_enabled()
|
267 |
+
and not math_sdp_enabled()
|
268 |
+
):
|
269 |
+
return SDPBackend.ERROR
|
270 |
+
|
271 |
+
ordering = (
|
272 |
+
SDPBackend.FLASH_ATTENTION,
|
273 |
+
SDPBackend.EFFICIENT_ATTENTION,
|
274 |
+
SDPBackend.MATH,
|
275 |
+
)
|
276 |
+
|
277 |
+
params = SDPAParams(query, key, value, attn_mask, dropout, is_causal)
|
278 |
+
|
279 |
+
for backend in ordering:
|
280 |
+
if backend == SDPBackend.FLASH_ATTENTION:
|
281 |
+
if can_use_flash_attention(params) and _can_use_flash_sdpa_jagged(params):
|
282 |
+
return SDPBackend.FLASH_ATTENTION
|
283 |
+
if backend == SDPBackend.EFFICIENT_ATTENTION:
|
284 |
+
if can_use_efficient_attention(params) and _can_use_efficient_sdpa_jagged(
|
285 |
+
params
|
286 |
+
):
|
287 |
+
return SDPBackend.EFFICIENT_ATTENTION
|
288 |
+
if backend == SDPBackend.MATH:
|
289 |
+
if math_sdp_enabled() and _can_use_math_sdpa_jagged(params):
|
290 |
+
return SDPBackend.MATH
|
291 |
+
|
292 |
+
log.warning("Memory efficient kernel not used because:")
|
293 |
+
can_use_efficient_attention(params, debug=True)
|
294 |
+
_can_use_efficient_sdpa_jagged(params, debug=True)
|
295 |
+
log.warning("Flash attention kernel not used because:")
|
296 |
+
can_use_flash_attention(params, debug=True)
|
297 |
+
_can_use_flash_sdpa_jagged(params, debug=True)
|
298 |
+
log.warning("Math attention kernel not used because:")
|
299 |
+
_can_use_math_sdpa_jagged(params, debug=True)
|
300 |
+
return SDPBackend.ERROR
|
301 |
+
|
302 |
+
|
303 |
+
def _cumulative_and_max_seq_len_nnz(qkv: torch.Tensor) -> Tuple[torch.Tensor, int, int]:
|
304 |
+
# This function is used to calculate two pieces of metadata that are needed
|
305 |
+
# for use with flash-attention and efficient_attention kernels. They are the
|
306 |
+
# cumulative sequence_length over a batch of sequences and the maximum
|
307 |
+
# sequence length.
|
308 |
+
|
309 |
+
# It returns a tuple of cumulative sequence lengths and the maximum sequence
|
310 |
+
# length, and the last element in the cumulative_sequence_lengths
|
311 |
+
if not isinstance(qkv, NestedTensor):
|
312 |
+
raise ValueError("QKV must be nested for flash cumulative_seq_len calculation.")
|
313 |
+
|
314 |
+
if qkv.lengths() is None:
|
315 |
+
# TODO: Explore performance impact of copying
|
316 |
+
cumulative_seqlen = qkv.offsets().to(dtype=torch.int32, device=qkv.device)
|
317 |
+
max_seqlen = qkv._max_seqlen
|
318 |
+
n_elem = qkv.values().shape[0]
|
319 |
+
else:
|
320 |
+
# TODO: Explore performance impact of copying
|
321 |
+
cumulative_seqlen = (
|
322 |
+
qkv.lengths().cumsum(0).to(dtype=torch.int32, device=qkv.device)
|
323 |
+
)
|
324 |
+
batch_size = qkv.size(0)
|
325 |
+
max_seqlen = qkv._max_seqlen
|
326 |
+
# TODO: Explore performance impact when compiling
|
327 |
+
n_elem = int(cumulative_seqlen[-1].item())
|
328 |
+
return cumulative_seqlen, max_seqlen, n_elem
|
329 |
+
|
330 |
+
|
331 |
+
def _is_safe_to_get_storage_as_tensor(tensor: torch.Tensor):
|
332 |
+
# This function checks if a nested tensor is valid for
|
333 |
+
# use with the flash-attention and efficient_attention kernels without
|
334 |
+
# needing to call contiguous on the nested tensor input.
|
335 |
+
# It checks that the storage offsets' adjacent_differences are a constant
|
336 |
+
# mutiple of the previous tensor in the nested tensor and that the strides
|
337 |
+
# are monitonically decreasing. This check is done after calling transpose on
|
338 |
+
# the nested tensor resulting in a Nt of shape [bsz, {seq_len}, num_heads, dim]
|
339 |
+
|
340 |
+
# Returns a boolean indicating if contiguous needs to be called for input
|
341 |
+
assert isinstance(tensor, NestedTensor)
|
342 |
+
offsets = tensor.offsets()
|
343 |
+
strides = tensor._strides
|
344 |
+
|
345 |
+
n_tensors = offsets.size(0) - 1
|
346 |
+
if n_tensors <= 1:
|
347 |
+
return True
|
348 |
+
|
349 |
+
# Check initially that the tensor strides are in strictly descending order
|
350 |
+
prev_stride = strides[1]
|
351 |
+
for stride in strides[2:]:
|
352 |
+
if prev_stride <= stride:
|
353 |
+
# This would mean that the last stride is greater than the seq_len
|
354 |
+
# stride
|
355 |
+
return False
|
356 |
+
prev_stride = stride
|
357 |
+
|
358 |
+
# Congrats you made it!
|
359 |
+
return True
|
360 |
+
|
361 |
+
|
362 |
+
def _view_as_dense(
|
363 |
+
tensor: torch.Tensor, Nnz: int, num_heads: int, head_dim: int
|
364 |
+
) -> torch.Tensor:
|
365 |
+
if tensor.is_nested:
|
366 |
+
return tensor.values()
|
367 |
+
return tensor.view(Nnz, num_heads, head_dim)
|
368 |
+
|
369 |
+
|
370 |
+
# TODO: Next iteration should add test cases and check it works
|
371 |
+
# def _sdpa_nested_preprocessing_with_broadcast(query, key, value):
|
372 |
+
# # Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
|
373 |
+
# # Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
374 |
+
# # Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
375 |
+
# q_batch_size = query.size(0)
|
376 |
+
# k_batch_size = key.size(0)
|
377 |
+
# v_batch_size = value.size(0)
|
378 |
+
|
379 |
+
# output_batch_size = max(q_batch_size, k_batch_size, v_batch_size)
|
380 |
+
|
381 |
+
# q_num_heads = query.size(1)
|
382 |
+
# k_num_heads = key.size(1)
|
383 |
+
# v_num_heads = value.size(1)
|
384 |
+
|
385 |
+
# output_num_heads = max(q_num_heads, k_num_heads, v_num_heads)
|
386 |
+
|
387 |
+
# head_dim_qk = query.size(3)
|
388 |
+
# head_dim_v = value.size(3)
|
389 |
+
|
390 |
+
# q_t = query.transpose(1, 2)
|
391 |
+
# k_t = key.transpose(1, 2)
|
392 |
+
# v_t = value.transpose(1, 2)
|
393 |
+
|
394 |
+
# # Checks in sdp_utils ensure that if {*}_batch_size/{*}_num_heads !=
|
395 |
+
# # output_batch_size/num_heads then they are 1
|
396 |
+
# q_batch_size_needs_broadcast = q_batch_size != output_batch_size
|
397 |
+
# k_batch_size_needs_broadcast = k_batch_size != output_batch_size
|
398 |
+
# v_batch_size_needs_broadcast = v_batch_size != output_batch_size
|
399 |
+
|
400 |
+
# # If {*}_batch_size_needs_broadcast, then
|
401 |
+
# # (1) max_seqlen_batch_{*} is given by {*}_t.size(1)
|
402 |
+
# # this is because needs_broadcast indicates that the batch_size is 1
|
403 |
+
# # and hence there is only 1 value for seq_len
|
404 |
+
# # (2) The cum_seq_lens are given by [0, {*}_t.size(1), 2 * {*}_t.size(1),
|
405 |
+
# # ..., outut_batch_size * {*}_t.size(1)]
|
406 |
+
# # (3) Nnz_{*} is given by output_batch_size * {*}_t.size(1)
|
407 |
+
|
408 |
+
# if q_batch_size_needs_broadcast or not q_t.is_nested:
|
409 |
+
# max_seqlen_batch_q = q_t.size(1)
|
410 |
+
# cumulative_sequence_length_q = torch.arange(
|
411 |
+
# 0,
|
412 |
+
# (output_batch_size + 1) * max_seqlen_batch_q,
|
413 |
+
# max_seqlen_batch_q,
|
414 |
+
# device=q_t.device,
|
415 |
+
# dtype=torch.int32,
|
416 |
+
# )
|
417 |
+
# Nnz_q = output_batch_size * max_seqlen_batch_q
|
418 |
+
# else:
|
419 |
+
# (
|
420 |
+
# cumulative_sequence_length_q,
|
421 |
+
# max_seqlen_batch_q,
|
422 |
+
# Nnz_q,
|
423 |
+
# ) = _cumulative_and_max_seq_len_nnz(q_t)
|
424 |
+
|
425 |
+
# if k_batch_size_needs_broadcast and v_batch_size_needs_broadcast:
|
426 |
+
# assert k_t.size(1) == v_t.size(1)
|
427 |
+
# max_seqlen_batch_kv = k_t.size(1)
|
428 |
+
# cumulative_sequence_length_kv = torch.arange(
|
429 |
+
# 0,
|
430 |
+
# (output_batch_size + 1) * max_seqlen_batch_kv,
|
431 |
+
# max_seqlen_batch_kv,
|
432 |
+
# device=k_t.device,
|
433 |
+
# dtype=torch.int32,
|
434 |
+
# )
|
435 |
+
# Nnz_kv = output_batch_size * max_seqlen_batch_kv
|
436 |
+
# else:
|
437 |
+
# cumulative_sequence_length_kv, max_seqlen_batch_kv, Nnz_kv = (
|
438 |
+
# _cumulative_and_max_seq_len_nnz(v_t)
|
439 |
+
# if k_batch_size_needs_broadcast
|
440 |
+
# else _cumulative_and_max_seq_len_nnz(k_t)
|
441 |
+
# )
|
442 |
+
|
443 |
+
# q_num_heads_needs_broadcast = q_num_heads != output_num_heads
|
444 |
+
# k_num_heads_needs_broadcast = k_num_heads != output_num_heads
|
445 |
+
# v_num_heads_needs_broadcast = v_num_heads != output_num_heads
|
446 |
+
|
447 |
+
# if not q_t.is_nested:
|
448 |
+
# query_buffer_reshaped = q_t.expand(
|
449 |
+
# output_batch_size, q_t.size(1), output_num_heads, head_dim_qk
|
450 |
+
# )
|
451 |
+
# query_buffer_reshaped = query_buffer_reshaped.reshape(
|
452 |
+
# Nnz_q, output_num_heads, head_dim_qk
|
453 |
+
# )
|
454 |
+
# else:
|
455 |
+
# if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
|
456 |
+
# q_t = q_t.contiguous()
|
457 |
+
# # If we are broadcasting then Nnz_q will be the output_batch_size since
|
458 |
+
# # seq_len is 1
|
459 |
+
# effective_batch_size_q = (
|
460 |
+
# output_batch_size if q_batch_size_needs_broadcast else Nnz_q
|
461 |
+
# )
|
462 |
+
# query_buffer_reshaped = _view_as_dense(
|
463 |
+
# q_t, effective_batch_size_q, output_num_heads, head_dim_qk
|
464 |
+
# )
|
465 |
+
|
466 |
+
# # If the physical layout of the NestedTensor's storage
|
467 |
+
# # is not: batch, {seq_len}, num_heads, head_dim then we need
|
468 |
+
# # to call contiguous
|
469 |
+
# if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
|
470 |
+
# k_t = k_t.contiguous()
|
471 |
+
# if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
|
472 |
+
# v_t = v_t.contiguous()
|
473 |
+
|
474 |
+
# effective_batch_size_k = (
|
475 |
+
# output_batch_size if k_batch_size_needs_broadcast else Nnz_kv
|
476 |
+
# )
|
477 |
+
# key_buffer_reshaped = _view_as_dense(
|
478 |
+
# k_t, effective_batch_size_k, output_num_heads, head_dim_qk
|
479 |
+
# )
|
480 |
+
|
481 |
+
# effective_batch_size_v = (
|
482 |
+
# output_batch_size if v_batch_size_needs_broadcast else Nnz_kv
|
483 |
+
# )
|
484 |
+
# value_buffer_reshaped = _view_as_dense(
|
485 |
+
# v_t, effective_batch_size_v, output_num_heads, head_dim_v
|
486 |
+
# )
|
487 |
+
|
488 |
+
# if not q_batch_size_needs_broadcast:
|
489 |
+
# output_shape = q_t._size
|
490 |
+
# if head_dim_v != head_dim_qk:
|
491 |
+
# output_shape[-1] = head_dim_v
|
492 |
+
# if q_num_heads_needs_broadcast:
|
493 |
+
# output_shape[1] = output_num_heads
|
494 |
+
# else:
|
495 |
+
# output_shape = torch.empty(3, dtype=torch.int64, device=torch.device("cpu"))
|
496 |
+
# output_shape[0] = q_t.size(1)
|
497 |
+
# output_shape[1] = output_num_heads
|
498 |
+
# output_shape[2] = head_dim_v
|
499 |
+
|
500 |
+
# return (
|
501 |
+
# query_buffer_reshaped,
|
502 |
+
# key_buffer_reshaped,
|
503 |
+
# value_buffer_reshaped,
|
504 |
+
# cumulative_sequence_length_q,
|
505 |
+
# cumulative_sequence_length_kv,
|
506 |
+
# max_seqlen_batch_q,
|
507 |
+
# max_seqlen_batch_kv,
|
508 |
+
# output_shape,
|
509 |
+
# )
|
510 |
+
|
511 |
+
|
512 |
+
def _sdpa_nested_preprocessing(query, key, value):
|
513 |
+
# Query (Batch x Num_heads x {Q_seq_len} x Dim_per_head)
|
514 |
+
# Key (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
515 |
+
# Value (Batch x Num_heads x {KV_seq_len} x Dim_per_head)
|
516 |
+
q_batch_size = query.size(0)
|
517 |
+
k_batch_size = key.size(0)
|
518 |
+
v_batch_size = value.size(0)
|
519 |
+
|
520 |
+
q_num_heads = query.size(1)
|
521 |
+
k_num_heads = key.size(1)
|
522 |
+
v_num_heads = value.size(1)
|
523 |
+
|
524 |
+
if not (q_batch_size == k_batch_size and q_batch_size == v_batch_size) or not (
|
525 |
+
q_num_heads == k_num_heads and k_num_heads == v_num_heads
|
526 |
+
):
|
527 |
+
raise RuntimeError(
|
528 |
+
"This path is currently not implemented for jagged layout NT."
|
529 |
+
)
|
530 |
+
# return _sdpa_nested_preprocessing_with_broadcast(query, key, value)
|
531 |
+
|
532 |
+
num_heads = query.size(1)
|
533 |
+
head_dim_qk = query.size(3)
|
534 |
+
head_dim_v = value.size(3)
|
535 |
+
q_t = query.transpose(1, 2)
|
536 |
+
k_t = key.transpose(1, 2)
|
537 |
+
v_t = value.transpose(1, 2)
|
538 |
+
|
539 |
+
(
|
540 |
+
cumulative_sequence_length_q,
|
541 |
+
max_seqlen_batch_q,
|
542 |
+
Nnz_q,
|
543 |
+
) = _cumulative_and_max_seq_len_nnz(q_t)
|
544 |
+
(
|
545 |
+
cumulative_sequence_length_kv,
|
546 |
+
max_seqlen_batch_kv,
|
547 |
+
Nnz_kv,
|
548 |
+
) = _cumulative_and_max_seq_len_nnz(k_t)
|
549 |
+
|
550 |
+
# [TODO] K and V have to have the same Nnz, should probably torch_check
|
551 |
+
# assume in order to not iterate over v
|
552 |
+
|
553 |
+
# If the physical layout of the NestedTensor's storage
|
554 |
+
# is not: batch, {seq_len}, num_heads, head_dim then we need
|
555 |
+
# to call contiguous
|
556 |
+
if not q_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(q_t):
|
557 |
+
q_t = q_t.contiguous()
|
558 |
+
if not k_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(k_t):
|
559 |
+
k_t = k_t.contiguous()
|
560 |
+
if not v_t.is_contiguous() and not _is_safe_to_get_storage_as_tensor(v_t):
|
561 |
+
v_t = v_t.contiguous()
|
562 |
+
|
563 |
+
query_buffer_reshaped = _view_as_dense(q_t, Nnz_q, num_heads, head_dim_qk)
|
564 |
+
key_buffer_reshaped = _view_as_dense(k_t, Nnz_kv, num_heads, head_dim_qk)
|
565 |
+
value_buffer_reshaped = _view_as_dense(v_t, Nnz_kv, num_heads, head_dim_v)
|
566 |
+
|
567 |
+
output_nt_info = {
|
568 |
+
"offsets": q_t.offsets(),
|
569 |
+
"_max_seqlen": q_t._max_seqlen,
|
570 |
+
"_min_seqlen": q_t._min_seqlen,
|
571 |
+
}
|
572 |
+
|
573 |
+
return (
|
574 |
+
query_buffer_reshaped,
|
575 |
+
key_buffer_reshaped,
|
576 |
+
value_buffer_reshaped,
|
577 |
+
cumulative_sequence_length_q,
|
578 |
+
cumulative_sequence_length_kv,
|
579 |
+
max_seqlen_batch_q,
|
580 |
+
max_seqlen_batch_kv,
|
581 |
+
output_nt_info,
|
582 |
+
)
|
583 |
+
|
584 |
+
|
585 |
+
def _pad_last_dim(
|
586 |
+
tensor: torch.Tensor, alignment_size: int, slice: bool
|
587 |
+
) -> torch.Tensor:
|
588 |
+
# FlashAttentionV2 requires that head dimension be a multiple of 8
|
589 |
+
# This was previously done within the kernel, however
|
590 |
+
# This causes the kernel to maybe alias query, key, value
|
591 |
+
# So instead we pad the head_dimensions to be a multiple of 8
|
592 |
+
# in the composite region
|
593 |
+
last_dim_size = tensor.size(-1)
|
594 |
+
if last_dim_size % alignment_size == 0:
|
595 |
+
return tensor
|
596 |
+
pad_count = alignment_size - (last_dim_size % alignment_size)
|
597 |
+
tensor = torch.nn.functional.pad(tensor, [0, pad_count])
|
598 |
+
if slice:
|
599 |
+
return tensor[..., 0:last_dim_size]
|
600 |
+
return tensor
|
601 |
+
|
602 |
+
|
603 |
+
# TODO: coalesce with torch/nn/utils/attention.py
|
604 |
+
def _calculate_scale(query, scale):
|
605 |
+
# TODO: Investigate why math.sqrt() isn't properly handled by Dynamo?
|
606 |
+
softmax_scale = scale if scale is not None else torch.sym_sqrt(1.0 / query.size(-1))
|
607 |
+
return softmax_scale
|
608 |
+
|
609 |
+
|
610 |
+
def _post_process_flash_output(out: torch.Tensor, og_size):
|
611 |
+
if not out.is_nested and out.size(-1) != og_size:
|
612 |
+
out = out[..., 0:og_size]
|
613 |
+
return out
|
614 |
+
|
615 |
+
|
616 |
+
def jagged_scaled_dot_product_attention(
|
617 |
+
query: torch.Tensor,
|
618 |
+
key: torch.Tensor,
|
619 |
+
value: torch.Tensor,
|
620 |
+
attn_mask: Optional[torch.Tensor] = None,
|
621 |
+
dropout_p=0.0,
|
622 |
+
is_causal=False,
|
623 |
+
scale=None,
|
624 |
+
):
|
625 |
+
_validate_sdpa_input(query, key, value, attn_mask, dropout_p, is_causal, scale)
|
626 |
+
# for mypy, ugh
|
627 |
+
assert (
|
628 |
+
isinstance(query, NestedTensor)
|
629 |
+
and isinstance(key, NestedTensor)
|
630 |
+
and isinstance(value, NestedTensor)
|
631 |
+
)
|
632 |
+
|
633 |
+
# Special path for non-ragged sequence length (e.g. for SAM where we have a ragged
|
634 |
+
# second batch dim instead). For this case, we can just send the dense buffers through
|
635 |
+
# vanilla SDPA.
|
636 |
+
if query.dim() > 3 and key.dim() > 3 and value.dim() > 3 and query._ragged_idx == 1:
|
637 |
+
from torch.nested._internal.ops import extract_kwargs
|
638 |
+
|
639 |
+
output = F.scaled_dot_product_attention(
|
640 |
+
query._values,
|
641 |
+
key._values,
|
642 |
+
value._values,
|
643 |
+
attn_mask=(
|
644 |
+
attn_mask._values if isinstance(attn_mask, NestedTensor) else attn_mask
|
645 |
+
),
|
646 |
+
dropout_p=dropout_p,
|
647 |
+
is_causal=is_causal,
|
648 |
+
scale=scale,
|
649 |
+
)
|
650 |
+
|
651 |
+
return NestedTensor(output, **extract_kwargs(query))
|
652 |
+
|
653 |
+
compute_logsumexp = query.requires_grad or key.requires_grad or value.requires_grad
|
654 |
+
|
655 |
+
backend_choice = _select_sdp_backend(
|
656 |
+
query, key, value, attn_mask, dropout_p, is_causal
|
657 |
+
)
|
658 |
+
|
659 |
+
if backend_choice == SDPBackend.FLASH_ATTENTION:
|
660 |
+
og_size = query.size(-1)
|
661 |
+
query_padded = _pad_last_dim(query, 8, False)
|
662 |
+
key_padded = _pad_last_dim(key, 8, False)
|
663 |
+
value_padded = _pad_last_dim(value, 8, False)
|
664 |
+
# We need to calculate the scale based off the OG head dim size
|
665 |
+
og_scale = _calculate_scale(query, scale)
|
666 |
+
(
|
667 |
+
query_buffer_reshaped,
|
668 |
+
key_buffer_reshaped,
|
669 |
+
value_buffer_reshaped,
|
670 |
+
cumulative_sequence_length_q,
|
671 |
+
cumulative_sequence_length_kv,
|
672 |
+
max_seqlen_batch_q,
|
673 |
+
max_seqlen_batch_kv,
|
674 |
+
output_nt_info,
|
675 |
+
) = _sdpa_nested_preprocessing(query_padded, key_padded, value_padded)
|
676 |
+
|
677 |
+
(
|
678 |
+
attention,
|
679 |
+
logsumexp,
|
680 |
+
philox_seed,
|
681 |
+
philox_offset,
|
682 |
+
debug_attn_mask,
|
683 |
+
) = torch.ops.aten._flash_attention_forward(
|
684 |
+
query_buffer_reshaped,
|
685 |
+
key_buffer_reshaped,
|
686 |
+
value_buffer_reshaped,
|
687 |
+
cumulative_sequence_length_q,
|
688 |
+
cumulative_sequence_length_kv,
|
689 |
+
max_seqlen_batch_q,
|
690 |
+
max_seqlen_batch_kv,
|
691 |
+
dropout_p,
|
692 |
+
is_causal,
|
693 |
+
False,
|
694 |
+
scale=og_scale,
|
695 |
+
)
|
696 |
+
# Reshape output to convert nnz to batch_size and seq_len
|
697 |
+
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
|
698 |
+
|
699 |
+
attention = nested_view_from_values_offsets(
|
700 |
+
attention.squeeze(0), output_nt_info["offsets"]
|
701 |
+
).transpose(1, 2)
|
702 |
+
return _post_process_flash_output(attention, og_size)
|
703 |
+
elif backend_choice == SDPBackend.EFFICIENT_ATTENTION:
|
704 |
+
(
|
705 |
+
query_reshaped,
|
706 |
+
key_reshaped,
|
707 |
+
value_reshaped,
|
708 |
+
cumulative_sequence_length_q,
|
709 |
+
cumulative_sequence_length_kv,
|
710 |
+
max_seqlen_batch_q,
|
711 |
+
max_seqlen_batch_kv,
|
712 |
+
output_nt_info,
|
713 |
+
) = _sdpa_nested_preprocessing(query, key, value)
|
714 |
+
(
|
715 |
+
attention,
|
716 |
+
log_sumexp,
|
717 |
+
seed,
|
718 |
+
offset,
|
719 |
+
max_seqlen_q,
|
720 |
+
max_seqlen_batch_kv,
|
721 |
+
) = torch.ops.aten._efficient_attention_forward(
|
722 |
+
query_reshaped.unsqueeze(0),
|
723 |
+
key_reshaped.unsqueeze(0),
|
724 |
+
value_reshaped.unsqueeze(0),
|
725 |
+
None,
|
726 |
+
cumulative_sequence_length_q,
|
727 |
+
cumulative_sequence_length_kv,
|
728 |
+
max_seqlen_batch_q,
|
729 |
+
max_seqlen_batch_kv,
|
730 |
+
dropout_p,
|
731 |
+
int(is_causal),
|
732 |
+
compute_logsumexp,
|
733 |
+
scale=scale,
|
734 |
+
)
|
735 |
+
|
736 |
+
# Reshape output to convert nnz to batch_size and seq_len
|
737 |
+
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
|
738 |
+
|
739 |
+
return nested_view_from_values_offsets(
|
740 |
+
attention.squeeze(0), output_nt_info["offsets"]
|
741 |
+
).transpose(1, 2)
|
742 |
+
elif backend_choice == SDPBackend.MATH:
|
743 |
+
# save the offsets and shape of the inputs, so we can reshape the final output
|
744 |
+
# query @ key = attn: [B, D1, j0, D'] @ [B, D1, D' j1] = [B, D1, j0, j1]
|
745 |
+
# attn @ value = out: [B, D1, j0, j1] @ [B, D1, j1, D2] = [B, D1, j0, D2]
|
746 |
+
offsets = query.offsets()
|
747 |
+
d1 = query._size[1]
|
748 |
+
d2 = value._size[-1]
|
749 |
+
|
750 |
+
# convert jagged layout Nested Tensor to strided layout Nested Tensor
|
751 |
+
# which support the math implementation of SDPA
|
752 |
+
def get_strided_layout_nested_tensor(jagged_layout_nt):
|
753 |
+
lengths = jagged_layout_nt._offsets[1:] - jagged_layout_nt._offsets[:-1]
|
754 |
+
transpose = torch.transpose(jagged_layout_nt, 1, 2)
|
755 |
+
tensor_list = transpose.values().split(list(lengths), dim=0)
|
756 |
+
strided_nt = torch.nested.as_nested_tensor(list(tensor_list))
|
757 |
+
strided_nt = strided_nt.transpose(1, 2).contiguous()
|
758 |
+
return strided_nt
|
759 |
+
|
760 |
+
query = get_strided_layout_nested_tensor(query)
|
761 |
+
key = get_strided_layout_nested_tensor(key)
|
762 |
+
value = get_strided_layout_nested_tensor(value)
|
763 |
+
|
764 |
+
attn_out = torch._scaled_dot_product_attention_math(
|
765 |
+
query, key, value, attn_mask, dropout_p, is_causal, scale=scale
|
766 |
+
)[0]
|
767 |
+
|
768 |
+
from torch.nested._internal.nested_tensor import nested_view_from_values_offsets
|
769 |
+
|
770 |
+
# convert strided layout Nested Tensor back to jagged layout Nested Tensor
|
771 |
+
attn_out = attn_out.transpose(1, 2).contiguous().values()
|
772 |
+
attn_out = attn_out.view(-1, d1, d2)
|
773 |
+
attn_out = nested_view_from_values_offsets(attn_out, offsets)
|
774 |
+
attn_out = attn_out.transpose(1, 2)
|
775 |
+
|
776 |
+
return attn_out
|
777 |
+
else:
|
778 |
+
raise RuntimeError(
|
779 |
+
"No viable backend for scaled_dot_product_attention was found."
|
780 |
+
)
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/__init__.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .linear_relu import LinearReLU
|
2 |
+
from .linear_fused import LinearBn1d
|
3 |
+
from .conv_fused import (
|
4 |
+
ConvBn1d,
|
5 |
+
ConvBn2d,
|
6 |
+
ConvBn3d,
|
7 |
+
ConvBnReLU1d,
|
8 |
+
ConvBnReLU2d,
|
9 |
+
ConvBnReLU3d,
|
10 |
+
ConvReLU1d,
|
11 |
+
ConvReLU2d,
|
12 |
+
ConvReLU3d,
|
13 |
+
update_bn_stats,
|
14 |
+
freeze_bn_stats,
|
15 |
+
)
|
16 |
+
|
17 |
+
__all__ = [
|
18 |
+
"LinearReLU",
|
19 |
+
"LinearBn1d",
|
20 |
+
"ConvReLU1d",
|
21 |
+
"ConvReLU2d",
|
22 |
+
"ConvReLU3d",
|
23 |
+
"ConvBn1d",
|
24 |
+
"ConvBn2d",
|
25 |
+
"ConvBn3d",
|
26 |
+
"ConvBnReLU1d",
|
27 |
+
"ConvBnReLU2d",
|
28 |
+
"ConvBnReLU3d",
|
29 |
+
"update_bn_stats",
|
30 |
+
"freeze_bn_stats",
|
31 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/qat/modules/linear_relu.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# flake8: noqa: F401
|
2 |
+
r"""Intrinsic QAT Modules.
|
3 |
+
|
4 |
+
This file is in the process of migration to `torch/ao/nn/intrinsic/qat`, and
|
5 |
+
is kept here for compatibility while the migration process is ongoing.
|
6 |
+
If you are adding a new entry/functionality, please, add it to the
|
7 |
+
appropriate file under the `torch/ao/nn/intrinsic/qat/modules`,
|
8 |
+
while adding an import statement here.
|
9 |
+
"""
|
10 |
+
|
11 |
+
__all__ = [
|
12 |
+
'LinearReLU',
|
13 |
+
]
|
14 |
+
|
15 |
+
from torch.ao.nn.intrinsic.qat import LinearReLU
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (374 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .modules import * # noqa: F403
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (233 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .linear_relu import LinearReLU
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
'LinearReLU',
|
5 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/dynamic/modules/linear_relu.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic.quantized.dynamic import LinearReLU
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
'LinearReLU',
|
5 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (451 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/bn_relu.cpython-310.pyc
ADDED
Binary file (336 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/conv_relu.cpython-310.pyc
ADDED
Binary file (378 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/__pycache__/linear_relu.cpython-310.pyc
ADDED
Binary file (302 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/intrinsic/quantized/modules/conv_relu.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU1d
|
2 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU2d
|
3 |
+
from torch.ao.nn.intrinsic.quantized import ConvReLU3d
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
'ConvReLU1d',
|
7 |
+
'ConvReLU2d',
|
8 |
+
'ConvReLU3d',
|
9 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__init__.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .module import Module
|
2 |
+
from .linear import Identity, Linear, Bilinear, LazyLinear
|
3 |
+
from .conv import Conv1d, Conv2d, Conv3d, \
|
4 |
+
ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, \
|
5 |
+
LazyConv1d, LazyConv2d, LazyConv3d, LazyConvTranspose1d, LazyConvTranspose2d, LazyConvTranspose3d
|
6 |
+
from .activation import Threshold, ReLU, Hardtanh, ReLU6, Sigmoid, Tanh, \
|
7 |
+
Softmax, Softmax2d, LogSoftmax, ELU, SELU, CELU, GELU, Hardshrink, LeakyReLU, LogSigmoid, \
|
8 |
+
Softplus, Softshrink, MultiheadAttention, PReLU, Softsign, Softmin, Tanhshrink, RReLU, GLU, \
|
9 |
+
Hardsigmoid, Hardswish, SiLU, Mish
|
10 |
+
from .loss import L1Loss, NLLLoss, KLDivLoss, MSELoss, BCELoss, BCEWithLogitsLoss, NLLLoss2d, \
|
11 |
+
CosineEmbeddingLoss, CTCLoss, HingeEmbeddingLoss, MarginRankingLoss, \
|
12 |
+
MultiLabelMarginLoss, MultiLabelSoftMarginLoss, MultiMarginLoss, SmoothL1Loss, HuberLoss, \
|
13 |
+
SoftMarginLoss, CrossEntropyLoss, TripletMarginLoss, TripletMarginWithDistanceLoss, PoissonNLLLoss, GaussianNLLLoss
|
14 |
+
from .container import Container, Sequential, ModuleList, ModuleDict, ParameterList, ParameterDict
|
15 |
+
from .pooling import AvgPool1d, AvgPool2d, AvgPool3d, MaxPool1d, MaxPool2d, MaxPool3d, \
|
16 |
+
MaxUnpool1d, MaxUnpool2d, MaxUnpool3d, FractionalMaxPool2d, FractionalMaxPool3d, LPPool1d, LPPool2d, LPPool3d, \
|
17 |
+
AdaptiveMaxPool1d, AdaptiveMaxPool2d, AdaptiveMaxPool3d, AdaptiveAvgPool1d, AdaptiveAvgPool2d, AdaptiveAvgPool3d
|
18 |
+
from .batchnorm import BatchNorm1d, BatchNorm2d, BatchNorm3d, SyncBatchNorm, \
|
19 |
+
LazyBatchNorm1d, LazyBatchNorm2d, LazyBatchNorm3d
|
20 |
+
from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d, \
|
21 |
+
LazyInstanceNorm1d, LazyInstanceNorm2d, LazyInstanceNorm3d
|
22 |
+
from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm
|
23 |
+
from .dropout import Dropout, Dropout1d, Dropout2d, Dropout3d, AlphaDropout, FeatureAlphaDropout
|
24 |
+
from .padding import ReflectionPad1d, ReflectionPad2d, ReflectionPad3d, ReplicationPad1d, ReplicationPad2d, \
|
25 |
+
ReplicationPad3d, ZeroPad1d, ZeroPad2d, ZeroPad3d, ConstantPad1d, ConstantPad2d, ConstantPad3d, \
|
26 |
+
CircularPad1d, CircularPad2d, CircularPad3d
|
27 |
+
from .sparse import Embedding, EmbeddingBag
|
28 |
+
from .rnn import RNNBase, RNN, LSTM, GRU, \
|
29 |
+
RNNCellBase, RNNCell, LSTMCell, GRUCell
|
30 |
+
from .pixelshuffle import PixelShuffle, PixelUnshuffle
|
31 |
+
from .upsampling import UpsamplingNearest2d, UpsamplingBilinear2d, Upsample
|
32 |
+
from .distance import PairwiseDistance, CosineSimilarity
|
33 |
+
from .fold import Fold, Unfold
|
34 |
+
from .adaptive import AdaptiveLogSoftmaxWithLoss
|
35 |
+
from .transformer import TransformerEncoder, TransformerDecoder, \
|
36 |
+
TransformerEncoderLayer, TransformerDecoderLayer, Transformer
|
37 |
+
from .flatten import Flatten, Unflatten
|
38 |
+
from .channelshuffle import ChannelShuffle
|
39 |
+
|
40 |
+
__all__ = [
|
41 |
+
'Module', 'Identity', 'Linear', 'Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d',
|
42 |
+
'ConvTranspose2d', 'ConvTranspose3d', 'Threshold', 'ReLU', 'Hardtanh', 'ReLU6',
|
43 |
+
'Sigmoid', 'Tanh', 'Softmax', 'Softmax2d', 'LogSoftmax', 'ELU', 'SELU', 'CELU', 'GLU', 'GELU', 'Hardshrink',
|
44 |
+
'LeakyReLU', 'LogSigmoid', 'Softplus', 'Softshrink', 'MultiheadAttention', 'PReLU', 'Softsign', 'Softmin',
|
45 |
+
'Tanhshrink', 'RReLU', 'L1Loss', 'NLLLoss', 'KLDivLoss', 'MSELoss', 'BCELoss', 'BCEWithLogitsLoss',
|
46 |
+
'NLLLoss2d', 'PoissonNLLLoss', 'CosineEmbeddingLoss', 'CTCLoss', 'HingeEmbeddingLoss', 'MarginRankingLoss',
|
47 |
+
'MultiLabelMarginLoss', 'MultiLabelSoftMarginLoss', 'MultiMarginLoss', 'SmoothL1Loss', 'GaussianNLLLoss',
|
48 |
+
'HuberLoss', 'SoftMarginLoss', 'CrossEntropyLoss', 'Container', 'Sequential', 'ModuleList', 'ModuleDict',
|
49 |
+
'ParameterList', 'ParameterDict', 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'MaxPool1d', 'MaxPool2d',
|
50 |
+
'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d', 'FractionalMaxPool2d', "FractionalMaxPool3d",
|
51 |
+
'LPPool1d', 'LPPool2d', 'LPPool3d', 'LocalResponseNorm', 'BatchNorm1d', 'BatchNorm2d', 'BatchNorm3d',
|
52 |
+
'InstanceNorm1d', 'InstanceNorm2d', 'InstanceNorm3d', 'LayerNorm', 'GroupNorm', 'SyncBatchNorm',
|
53 |
+
'Dropout', 'Dropout1d', 'Dropout2d', 'Dropout3d', 'AlphaDropout', 'FeatureAlphaDropout',
|
54 |
+
'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d', 'ReplicationPad2d', 'ReplicationPad1d', 'ReplicationPad3d',
|
55 |
+
'CrossMapLRN2d', 'Embedding', 'EmbeddingBag', 'RNNBase', 'RNN', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell',
|
56 |
+
'LSTMCell', 'GRUCell', 'PixelShuffle', 'PixelUnshuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d',
|
57 |
+
'PairwiseDistance', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d', 'AdaptiveAvgPool1d',
|
58 |
+
'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d', 'TripletMarginLoss', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d',
|
59 |
+
'ConstantPad1d', 'ConstantPad2d', 'ConstantPad3d', 'Bilinear', 'CosineSimilarity', 'Unfold', 'Fold',
|
60 |
+
'AdaptiveLogSoftmaxWithLoss', 'TransformerEncoder', 'TransformerDecoder',
|
61 |
+
'TransformerEncoderLayer', 'TransformerDecoderLayer', 'Transformer',
|
62 |
+
'LazyLinear', 'LazyConv1d', 'LazyConv2d', 'LazyConv3d',
|
63 |
+
'LazyConvTranspose1d', 'LazyConvTranspose2d', 'LazyConvTranspose3d',
|
64 |
+
'LazyBatchNorm1d', 'LazyBatchNorm2d', 'LazyBatchNorm3d',
|
65 |
+
'LazyInstanceNorm1d', 'LazyInstanceNorm2d', 'LazyInstanceNorm3d',
|
66 |
+
'Flatten', 'Unflatten', 'Hardsigmoid', 'Hardswish', 'SiLU', 'Mish', 'TripletMarginWithDistanceLoss', 'ChannelShuffle',
|
67 |
+
'CircularPad1d', 'CircularPad2d', 'CircularPad3d'
|
68 |
+
]
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (5.11 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/_functions.cpython-310.pyc
ADDED
Binary file (6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc
ADDED
Binary file (54.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/adaptive.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc
ADDED
Binary file (32 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/channelshuffle.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc
ADDED
Binary file (34.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/conv.cpython-310.pyc
ADDED
Binary file (58.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc
ADDED
Binary file (4.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/dropout.cpython-310.pyc
ADDED
Binary file (12.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc
ADDED
Binary file (5.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc
ADDED
Binary file (13.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/instancenorm.cpython-310.pyc
ADDED
Binary file (20.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/lazy.cpython-310.pyc
ADDED
Binary file (12 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/linear.cpython-310.pyc
ADDED
Binary file (10.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc
ADDED
Binary file (93.2 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/module.cpython-310.pyc
ADDED
Binary file (89.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/normalization.cpython-310.pyc
ADDED
Binary file (11.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/padding.cpython-310.pyc
ADDED
Binary file (33.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pixelshuffle.cpython-310.pyc
ADDED
Binary file (4.34 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/torch/nn/modules/__pycache__/pooling.cpython-310.pyc
ADDED
Binary file (57 kB). View file
|
|