applied-ai-018 commited on
Commit
50d2a30
·
verified ·
1 Parent(s): 2fef4a6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt +3 -0
  5. ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  6. ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  7. ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  8. ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  9. ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt +3 -0
  10. ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  11. ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt +3 -0
  12. ckpts/universal/global_step80/zero/24.input_layernorm.weight/exp_avg.pt +3 -0
  13. ckpts/universal/global_step80/zero/24.input_layernorm.weight/exp_avg_sq.pt +3 -0
  14. ckpts/universal/global_step80/zero/24.input_layernorm.weight/fp32.pt +3 -0
  15. ckpts/universal/global_step80/zero/25.attention.dense.weight/exp_avg.pt +3 -0
  16. ckpts/universal/global_step80/zero/25.attention.dense.weight/exp_avg_sq.pt +3 -0
  17. ckpts/universal/global_step80/zero/25.attention.dense.weight/fp32.pt +3 -0
  18. ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/exp_avg.pt +3 -0
  19. ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
  20. ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/fp32.pt +3 -0
  21. ckpts/universal/global_step80/zero/5.input_layernorm.weight/exp_avg.pt +3 -0
  22. ckpts/universal/global_step80/zero/5.input_layernorm.weight/exp_avg_sq.pt +3 -0
  23. ckpts/universal/global_step80/zero/5.input_layernorm.weight/fp32.pt +3 -0
  24. ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt +3 -0
  25. venv/lib/python3.10/site-packages/ninja/__init__.py +55 -0
  26. venv/lib/python3.10/site-packages/ninja/__main__.py +5 -0
  27. venv/lib/python3.10/site-packages/ninja/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/ninja/__pycache__/__main__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/ninja/__pycache__/_version.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/ninja/__pycache__/ninja_syntax.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/ninja/_version.py +16 -0
  32. venv/lib/python3.10/site-packages/ninja/data/bin/ninja +0 -0
  33. venv/lib/python3.10/site-packages/ninja/ninja_syntax.py +199 -0
  34. venv/lib/python3.10/site-packages/ninja/py.typed +0 -0
  35. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py +0 -0
  36. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py +0 -0
  38. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h +845 -0
  40. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py +0 -0
  41. venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h +64 -0
  44. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h +65 -0
  45. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h +452 -0
  46. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h +108 -0
  47. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h +174 -0
  48. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h +99 -0
  49. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h +212 -0
  50. venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h +634 -0
ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a1f4f4b0f77702489936a02a85ab84b2cf451b8a923cabcd439ccdaf0e2c16b
3
+ size 33555612
ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb75f79b6896de40267218ec3205339d3e51d69793c04d735c9fd76a1ffcffa
3
+ size 33555627
ckpts/universal/global_step80/zero/11.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f25b44c899c9833943cbc92ad671f8164d5745afbd19bddb94009d0901de88aa
3
+ size 33555533
ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bf29a714349432742c9fc0c2b459733143e9d87982278832f1b590d0291ccc0
3
+ size 33555612
ckpts/universal/global_step80/zero/20.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97c73703a571daea25a99ccb3d0acd495b011669e33817b47bed50252a835d9a
3
+ size 33555627
ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:263d3e73c46c5743ad6c7e7a7621d10d8a3ae3126a105277f3e6432799be4e4d
3
+ size 33555612
ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1de0fdfeb79772de997d465267e049c42acc5ccf6adfff34db297909ab1f1799
3
+ size 33555627
ckpts/universal/global_step80/zero/20.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49b0d4a768057f26c45d8243b43a907be486780d17497edcc8800c9cf04f08a6
3
+ size 33555533
ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0697e5b173c0cbbbacd70a14f0a0d55fdca3966434adebc6a17794bc9e035c
3
+ size 33555612
ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b17b4be9775fdadf87cdd2999b2330055dc158f9e30031af0d58bc30667a5a2
3
+ size 33555627
ckpts/universal/global_step80/zero/22.mlp.dense_h_to_4h_swiglu.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e979759688b070a3c76f1015491fb0279f8ef614188dab89a21f9d8be3b038a4
3
+ size 33555533
ckpts/universal/global_step80/zero/24.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12094f6f109942a2134efc5514383e33d9115d48b80093b06ef4448f159e4366
3
+ size 9372
ckpts/universal/global_step80/zero/24.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9863024cd1a2a898b72fdf5e8dd904f94268afdce6cd517ebc750e7213b47a6
3
+ size 9387
ckpts/universal/global_step80/zero/24.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e271901b957595f7153b6df165a2e5d78c759237f3ec6ee1327c4c31a2f57a75
3
+ size 9293
ckpts/universal/global_step80/zero/25.attention.dense.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be05e2b113e995581a8230108fade1332dc7b66303a6230f59bb98237684396b
3
+ size 16778396
ckpts/universal/global_step80/zero/25.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f3ffc4438a34dffa44ce9ba0b09fdae85048201397669658a191faac0fc761b
3
+ size 16778411
ckpts/universal/global_step80/zero/25.attention.dense.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:247c618ab08a838420025639a0f797e27b0e09b00849bfb1f403433c69943a30
3
+ size 16778317
ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b92903fa43945373a4b7db23e876cb4551282d19da6a3e8ce4c5bfa78cf4a871
3
+ size 9372
ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d311289a11a7670d7137af2e4deb664fc69416f6e2424afa104b8e4f859fd025
3
+ size 9387
ckpts/universal/global_step80/zero/26.post_attention_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9618128e1ee81a02aad03b9591207a3a94d33572eb1ad99cf446ed3cf1283a05
3
+ size 9293
ckpts/universal/global_step80/zero/5.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc38b202dfb63596f2c95baf643a31411661e95949084b5fcd9e962f3e2924c4
3
+ size 9372
ckpts/universal/global_step80/zero/5.input_layernorm.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07be5e882c7a21fcb718a31c148002f7dd546c784b7dcdc21565a40e71dd970f
3
+ size 9387
ckpts/universal/global_step80/zero/5.input_layernorm.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4aa254b9048ea1aefb2c821f2a22eb35a30bb0d1b61298904bd4542edb9961f
3
+ size 9293
ckpts/universal/global_step80/zero/7.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0da005f8af6b09e8a6d9560cfe1b34dace463a7751ce4b3180ed7940cd3e9e4b
3
+ size 33555627
venv/lib/python3.10/site-packages/ninja/__init__.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import os
3
+ import platform
4
+ import subprocess
5
+ import sys
6
+
7
+ from ._version import version as __version__
8
+
9
+ __all__ = ["__version__", "DATA", "BIN_DIR", "ninja"]
10
+
11
+
12
+ def __dir__():
13
+ return __all__
14
+
15
+
16
+ try:
17
+ from .ninja_syntax import Writer, escape, expand
18
+ except ImportError:
19
+ # Support importing `ninja_syntax` from the source tree
20
+ if not os.path.exists(
21
+ os.path.join(os.path.dirname(__file__), 'ninja_syntax.py')):
22
+ sys.path.insert(0, os.path.abspath(os.path.join(
23
+ os.path.dirname(__file__), '../../Ninja-src/misc')))
24
+ from ninja_syntax import Writer, escape, expand # noqa: F401
25
+
26
+ DATA = os.path.join(os.path.dirname(__file__), 'data')
27
+
28
+ # Support running tests from the source tree
29
+ if not os.path.exists(DATA):
30
+ from skbuild.constants import CMAKE_INSTALL_DIR as SKBUILD_CMAKE_INSTALL_DIR
31
+ from skbuild.constants import set_skbuild_plat_name
32
+
33
+ if platform.system().lower() == "darwin":
34
+ # Since building the project specifying --plat-name or CMAKE_OSX_* variables
35
+ # leads to different SKBUILD_DIR, the code below attempt to guess the most
36
+ # likely plat-name.
37
+ _skbuild_dirs = os.listdir(os.path.join(os.path.dirname(__file__), '..', '..', '_skbuild'))
38
+ if _skbuild_dirs:
39
+ _likely_plat_name = '-'.join(_skbuild_dirs[0].split('-')[:3])
40
+ set_skbuild_plat_name(_likely_plat_name)
41
+
42
+ _data = os.path.abspath(os.path.join(
43
+ os.path.dirname(__file__), '..', '..', SKBUILD_CMAKE_INSTALL_DIR(), 'src/ninja/data'))
44
+ if os.path.exists(_data):
45
+ DATA = _data
46
+
47
+ BIN_DIR = os.path.join(DATA, 'bin')
48
+
49
+
50
+ def _program(name, args):
51
+ return subprocess.call([os.path.join(BIN_DIR, name)] + args, close_fds=False)
52
+
53
+
54
+ def ninja():
55
+ raise SystemExit(_program('ninja', sys.argv[1:]))
venv/lib/python3.10/site-packages/ninja/__main__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ from ninja import ninja
3
+
4
+ if __name__ == '__main__':
5
+ ninja()
venv/lib/python3.10/site-packages/ninja/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.59 kB). View file
 
venv/lib/python3.10/site-packages/ninja/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (245 Bytes). View file
 
venv/lib/python3.10/site-packages/ninja/__pycache__/_version.cpython-310.pyc ADDED
Binary file (504 Bytes). View file
 
venv/lib/python3.10/site-packages/ninja/__pycache__/ninja_syntax.cpython-310.pyc ADDED
Binary file (5.97 kB). View file
 
venv/lib/python3.10/site-packages/ninja/_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '1.11.1.1'
16
+ __version_tuple__ = version_tuple = (1, 11, 1, 1)
venv/lib/python3.10/site-packages/ninja/data/bin/ninja ADDED
Binary file (753 kB). View file
 
venv/lib/python3.10/site-packages/ninja/ninja_syntax.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+
3
+ # Copyright 2011 Google Inc. All Rights Reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """Python module for generating .ninja files.
18
+
19
+ Note that this is emphatically not a required piece of Ninja; it's
20
+ just a helpful utility for build-file-generation systems that already
21
+ use Python.
22
+ """
23
+
24
+ import re
25
+ import textwrap
26
+
27
+ def escape_path(word):
28
+ return word.replace('$ ', '$$ ').replace(' ', '$ ').replace(':', '$:')
29
+
30
+ class Writer(object):
31
+ def __init__(self, output, width=78):
32
+ self.output = output
33
+ self.width = width
34
+
35
+ def newline(self):
36
+ self.output.write('\n')
37
+
38
+ def comment(self, text):
39
+ for line in textwrap.wrap(text, self.width - 2, break_long_words=False,
40
+ break_on_hyphens=False):
41
+ self.output.write('# ' + line + '\n')
42
+
43
+ def variable(self, key, value, indent=0):
44
+ if value is None:
45
+ return
46
+ if isinstance(value, list):
47
+ value = ' '.join(filter(None, value)) # Filter out empty strings.
48
+ self._line('%s = %s' % (key, value), indent)
49
+
50
+ def pool(self, name, depth):
51
+ self._line('pool %s' % name)
52
+ self.variable('depth', depth, indent=1)
53
+
54
+ def rule(self, name, command, description=None, depfile=None,
55
+ generator=False, pool=None, restat=False, rspfile=None,
56
+ rspfile_content=None, deps=None):
57
+ self._line('rule %s' % name)
58
+ self.variable('command', command, indent=1)
59
+ if description:
60
+ self.variable('description', description, indent=1)
61
+ if depfile:
62
+ self.variable('depfile', depfile, indent=1)
63
+ if generator:
64
+ self.variable('generator', '1', indent=1)
65
+ if pool:
66
+ self.variable('pool', pool, indent=1)
67
+ if restat:
68
+ self.variable('restat', '1', indent=1)
69
+ if rspfile:
70
+ self.variable('rspfile', rspfile, indent=1)
71
+ if rspfile_content:
72
+ self.variable('rspfile_content', rspfile_content, indent=1)
73
+ if deps:
74
+ self.variable('deps', deps, indent=1)
75
+
76
+ def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
77
+ variables=None, implicit_outputs=None, pool=None, dyndep=None):
78
+ outputs = as_list(outputs)
79
+ out_outputs = [escape_path(x) for x in outputs]
80
+ all_inputs = [escape_path(x) for x in as_list(inputs)]
81
+
82
+ if implicit:
83
+ implicit = [escape_path(x) for x in as_list(implicit)]
84
+ all_inputs.append('|')
85
+ all_inputs.extend(implicit)
86
+ if order_only:
87
+ order_only = [escape_path(x) for x in as_list(order_only)]
88
+ all_inputs.append('||')
89
+ all_inputs.extend(order_only)
90
+ if implicit_outputs:
91
+ implicit_outputs = [escape_path(x)
92
+ for x in as_list(implicit_outputs)]
93
+ out_outputs.append('|')
94
+ out_outputs.extend(implicit_outputs)
95
+
96
+ self._line('build %s: %s' % (' '.join(out_outputs),
97
+ ' '.join([rule] + all_inputs)))
98
+ if pool is not None:
99
+ self._line(' pool = %s' % pool)
100
+ if dyndep is not None:
101
+ self._line(' dyndep = %s' % dyndep)
102
+
103
+ if variables:
104
+ if isinstance(variables, dict):
105
+ iterator = iter(variables.items())
106
+ else:
107
+ iterator = iter(variables)
108
+
109
+ for key, val in iterator:
110
+ self.variable(key, val, indent=1)
111
+
112
+ return outputs
113
+
114
+ def include(self, path):
115
+ self._line('include %s' % path)
116
+
117
+ def subninja(self, path):
118
+ self._line('subninja %s' % path)
119
+
120
+ def default(self, paths):
121
+ self._line('default %s' % ' '.join(as_list(paths)))
122
+
123
+ def _count_dollars_before_index(self, s, i):
124
+ """Returns the number of '$' characters right in front of s[i]."""
125
+ dollar_count = 0
126
+ dollar_index = i - 1
127
+ while dollar_index > 0 and s[dollar_index] == '$':
128
+ dollar_count += 1
129
+ dollar_index -= 1
130
+ return dollar_count
131
+
132
+ def _line(self, text, indent=0):
133
+ """Write 'text' word-wrapped at self.width characters."""
134
+ leading_space = ' ' * indent
135
+ while len(leading_space) + len(text) > self.width:
136
+ # The text is too wide; wrap if possible.
137
+
138
+ # Find the rightmost space that would obey our width constraint and
139
+ # that's not an escaped space.
140
+ available_space = self.width - len(leading_space) - len(' $')
141
+ space = available_space
142
+ while True:
143
+ space = text.rfind(' ', 0, space)
144
+ if (space < 0 or
145
+ self._count_dollars_before_index(text, space) % 2 == 0):
146
+ break
147
+
148
+ if space < 0:
149
+ # No such space; just use the first unescaped space we can find.
150
+ space = available_space - 1
151
+ while True:
152
+ space = text.find(' ', space + 1)
153
+ if (space < 0 or
154
+ self._count_dollars_before_index(text, space) % 2 == 0):
155
+ break
156
+ if space < 0:
157
+ # Give up on breaking.
158
+ break
159
+
160
+ self.output.write(leading_space + text[0:space] + ' $\n')
161
+ text = text[space+1:]
162
+
163
+ # Subsequent lines are continuations, so indent them.
164
+ leading_space = ' ' * (indent+2)
165
+
166
+ self.output.write(leading_space + text + '\n')
167
+
168
+ def close(self):
169
+ self.output.close()
170
+
171
+
172
+ def as_list(input):
173
+ if input is None:
174
+ return []
175
+ if isinstance(input, list):
176
+ return input
177
+ return [input]
178
+
179
+
180
+ def escape(string):
181
+ """Escape a string such that it can be embedded into a Ninja file without
182
+ further interpretation."""
183
+ assert '\n' not in string, 'Ninja syntax does not allow newlines'
184
+ # We only have one special metacharacter: '$'.
185
+ return string.replace('$', '$$')
186
+
187
+
188
+ def expand(string, vars, local_vars={}):
189
+ """Expand a string containing $vars as Ninja would.
190
+
191
+ Note: doesn't handle the full Ninja variable syntax, but it's enough
192
+ to make configure.py's use of it work.
193
+ """
194
+ def exp(m):
195
+ var = m.group(1)
196
+ if var == '$':
197
+ return '$'
198
+ return local_vars.get(var, vars.get(var, ''))
199
+ return re.sub(r'\$(\$|\w*)', exp, string)
venv/lib/python3.10/site-packages/ninja/py.typed ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/include/nvrtc.h ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //
2
+ // NVIDIA_COPYRIGHT_BEGIN
3
+ //
4
+ // Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved.
5
+ //
6
+ // NVIDIA CORPORATION and its licensors retain all intellectual property
7
+ // and proprietary rights in and to this software, related documentation
8
+ // and any modifications thereto. Any use, reproduction, disclosure or
9
+ // distribution of this software and related documentation without an express
10
+ // license agreement from NVIDIA CORPORATION is strictly prohibited.
11
+ //
12
+ // NVIDIA_COPYRIGHT_END
13
+ //
14
+
15
+ #ifndef __NVRTC_H__
16
+ #define __NVRTC_H__
17
+
18
+ #ifdef __cplusplus
19
+ extern "C" {
20
+ #endif /* __cplusplus */
21
+
22
+ #include <stdlib.h>
23
+
24
+
25
+ /*************************************************************************//**
26
+ *
27
+ * \defgroup error Error Handling
28
+ *
29
+ * NVRTC defines the following enumeration type and function for API call
30
+ * error handling.
31
+ *
32
+ ****************************************************************************/
33
+
34
+
35
+ /**
36
+ * \ingroup error
37
+ * \brief The enumerated type nvrtcResult defines API call result codes.
38
+ * NVRTC API functions return nvrtcResult to indicate the call
39
+ * result.
40
+ */
41
+ typedef enum {
42
+ NVRTC_SUCCESS = 0,
43
+ NVRTC_ERROR_OUT_OF_MEMORY = 1,
44
+ NVRTC_ERROR_PROGRAM_CREATION_FAILURE = 2,
45
+ NVRTC_ERROR_INVALID_INPUT = 3,
46
+ NVRTC_ERROR_INVALID_PROGRAM = 4,
47
+ NVRTC_ERROR_INVALID_OPTION = 5,
48
+ NVRTC_ERROR_COMPILATION = 6,
49
+ NVRTC_ERROR_BUILTIN_OPERATION_FAILURE = 7,
50
+ NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION = 8,
51
+ NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION = 9,
52
+ NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID = 10,
53
+ NVRTC_ERROR_INTERNAL_ERROR = 11,
54
+ NVRTC_ERROR_TIME_FILE_WRITE_FAILED = 12
55
+ } nvrtcResult;
56
+
57
+
58
+ /**
59
+ * \ingroup error
60
+ * \brief nvrtcGetErrorString is a helper function that returns a string
61
+ * describing the given nvrtcResult code, e.g., NVRTC_SUCCESS to
62
+ * \c "NVRTC_SUCCESS".
63
+ * For unrecognized enumeration values, it returns
64
+ * \c "NVRTC_ERROR unknown".
65
+ *
66
+ * \param [in] result CUDA Runtime Compilation API result code.
67
+ * \return Message string for the given #nvrtcResult code.
68
+ */
69
+ const char *nvrtcGetErrorString(nvrtcResult result);
70
+
71
+
72
+ /*************************************************************************//**
73
+ *
74
+ * \defgroup query General Information Query
75
+ *
76
+ * NVRTC defines the following function for general information query.
77
+ *
78
+ ****************************************************************************/
79
+
80
+
81
+ /**
82
+ * \ingroup query
83
+ * \brief nvrtcVersion sets the output parameters \p major and \p minor
84
+ * with the CUDA Runtime Compilation version number.
85
+ *
86
+ * \param [out] major CUDA Runtime Compilation major version number.
87
+ * \param [out] minor CUDA Runtime Compilation minor version number.
88
+ * \return
89
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
90
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
91
+ *
92
+ */
93
+ nvrtcResult nvrtcVersion(int *major, int *minor);
94
+
95
+
96
+ /**
97
+ * \ingroup query
98
+ * \brief nvrtcGetNumSupportedArchs sets the output parameter \p numArchs
99
+ * with the number of architectures supported by NVRTC. This can
100
+ * then be used to pass an array to ::nvrtcGetSupportedArchs to
101
+ * get the supported architectures.
102
+ *
103
+ * \param [out] numArchs number of supported architectures.
104
+ * \return
105
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
106
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
107
+ *
108
+ * see ::nvrtcGetSupportedArchs
109
+ */
110
+ nvrtcResult nvrtcGetNumSupportedArchs(int* numArchs);
111
+
112
+
113
+ /**
114
+ * \ingroup query
115
+ * \brief nvrtcGetSupportedArchs populates the array passed via the output parameter
116
+ * \p supportedArchs with the architectures supported by NVRTC. The array is
117
+ * sorted in the ascending order. The size of the array to be passed can be
118
+ * determined using ::nvrtcGetNumSupportedArchs.
119
+ *
120
+ * \param [out] supportedArchs sorted array of supported architectures.
121
+ * \return
122
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
123
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
124
+ *
125
+ * see ::nvrtcGetNumSupportedArchs
126
+ */
127
+ nvrtcResult nvrtcGetSupportedArchs(int* supportedArchs);
128
+
129
+
130
+ /*************************************************************************//**
131
+ *
132
+ * \defgroup compilation Compilation
133
+ *
134
+ * NVRTC defines the following type and functions for actual compilation.
135
+ *
136
+ ****************************************************************************/
137
+
138
+
139
+ /**
140
+ * \ingroup compilation
141
+ * \brief nvrtcProgram is the unit of compilation, and an opaque handle for
142
+ * a program.
143
+ *
144
+ * To compile a CUDA program string, an instance of nvrtcProgram must be
145
+ * created first with ::nvrtcCreateProgram, then compiled with
146
+ * ::nvrtcCompileProgram.
147
+ */
148
+ typedef struct _nvrtcProgram *nvrtcProgram;
149
+
150
+
151
+ /**
152
+ * \ingroup compilation
153
+ * \brief nvrtcCreateProgram creates an instance of nvrtcProgram with the
154
+ * given input parameters, and sets the output parameter \p prog with
155
+ * it.
156
+ *
157
+ * \param [out] prog CUDA Runtime Compilation program.
158
+ * \param [in] src CUDA program source.
159
+ * \param [in] name CUDA program name.\n
160
+ * \p name can be \c NULL; \c "default_program" is
161
+ * used when \p name is \c NULL or "".
162
+ * \param [in] numHeaders Number of headers used.\n
163
+ * \p numHeaders must be greater than or equal to 0.
164
+ * \param [in] headers Sources of the headers.\n
165
+ * \p headers can be \c NULL when \p numHeaders is
166
+ * 0.
167
+ * \param [in] includeNames Name of each header by which they can be
168
+ * included in the CUDA program source.\n
169
+ * \p includeNames can be \c NULL when \p numHeaders
170
+ * is 0. These headers must be included with the exact
171
+ * names specified here.
172
+ * \return
173
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
174
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
175
+ * - \link #nvrtcResult NVRTC_ERROR_PROGRAM_CREATION_FAILURE \endlink
176
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
177
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
178
+ *
179
+ * \see ::nvrtcDestroyProgram
180
+ */
181
+ nvrtcResult nvrtcCreateProgram(nvrtcProgram *prog,
182
+ const char *src,
183
+ const char *name,
184
+ int numHeaders,
185
+ const char * const *headers,
186
+ const char * const *includeNames);
187
+
188
+
189
+ /**
190
+ * \ingroup compilation
191
+ * \brief nvrtcDestroyProgram destroys the given program.
192
+ *
193
+ * \param [in] prog CUDA Runtime Compilation program.
194
+ * \return
195
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
196
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
197
+ *
198
+ * \see ::nvrtcCreateProgram
199
+ */
200
+ nvrtcResult nvrtcDestroyProgram(nvrtcProgram *prog);
201
+
202
+
203
+ /**
204
+ * \ingroup compilation
205
+ * \brief nvrtcCompileProgram compiles the given program.
206
+ *
207
+ * \param [in] prog CUDA Runtime Compilation program.
208
+ * \param [in] numOptions Number of compiler options passed.
209
+ * \param [in] options Compiler options in the form of C string array.\n
210
+ * \p options can be \c NULL when \p numOptions is 0.
211
+ *
212
+ * \return
213
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
214
+ * - \link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \endlink
215
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
216
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
217
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_OPTION \endlink
218
+ * - \link #nvrtcResult NVRTC_ERROR_COMPILATION \endlink
219
+ * - \link #nvrtcResult NVRTC_ERROR_BUILTIN_OPERATION_FAILURE \endlink
220
+ * - \link #nvrtcResult NVRTC_ERROR_TIME_FILE_WRITE_FAILED \endlink
221
+ *
222
+ * It supports compile options listed in \ref options.
223
+ */
224
+ nvrtcResult nvrtcCompileProgram(nvrtcProgram prog,
225
+ int numOptions, const char * const *options);
226
+
227
+
228
+ /**
229
+ * \ingroup compilation
230
+ * \brief nvrtcGetPTXSize sets the value of \p ptxSizeRet with the size of the PTX
231
+ * generated by the previous compilation of \p prog (including the
232
+ * trailing \c NULL).
233
+ *
234
+ * \param [in] prog CUDA Runtime Compilation program.
235
+ * \param [out] ptxSizeRet Size of the generated PTX (including the trailing
236
+ * \c NULL).
237
+ * \return
238
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
239
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
240
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
241
+ *
242
+ * \see ::nvrtcGetPTX
243
+ */
244
+ nvrtcResult nvrtcGetPTXSize(nvrtcProgram prog, size_t *ptxSizeRet);
245
+
246
+
247
+ /**
248
+ * \ingroup compilation
249
+ * \brief nvrtcGetPTX stores the PTX generated by the previous compilation
250
+ * of \p prog in the memory pointed by \p ptx.
251
+ *
252
+ * \param [in] prog CUDA Runtime Compilation program.
253
+ * \param [out] ptx Compiled result.
254
+ * \return
255
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
256
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
257
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
258
+ *
259
+ * \see ::nvrtcGetPTXSize
260
+ */
261
+ nvrtcResult nvrtcGetPTX(nvrtcProgram prog, char *ptx);
262
+
263
+
264
+ /**
265
+ * \ingroup compilation
266
+ * \brief nvrtcGetCUBINSize sets the value of \p cubinSizeRet with the size of the cubin
267
+ * generated by the previous compilation of \p prog. The value of
268
+ * cubinSizeRet is set to 0 if the value specified to \c -arch is a
269
+ * virtual architecture instead of an actual architecture.
270
+ *
271
+ * \param [in] prog CUDA Runtime Compilation program.
272
+ * \param [out] cubinSizeRet Size of the generated cubin.
273
+ * \return
274
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
275
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
276
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
277
+ *
278
+ * \see ::nvrtcGetCUBIN
279
+ */
280
+ nvrtcResult nvrtcGetCUBINSize(nvrtcProgram prog, size_t *cubinSizeRet);
281
+
282
+
283
+ /**
284
+ * \ingroup compilation
285
+ * \brief nvrtcGetCUBIN stores the cubin generated by the previous compilation
286
+ * of \p prog in the memory pointed by \p cubin. No cubin is available
287
+ * if the value specified to \c -arch is a virtual architecture instead
288
+ * of an actual architecture.
289
+ *
290
+ * \param [in] prog CUDA Runtime Compilation program.
291
+ * \param [out] cubin Compiled and assembled result.
292
+ * \return
293
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
294
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
295
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
296
+ *
297
+ * \see ::nvrtcGetCUBINSize
298
+ */
299
+ nvrtcResult nvrtcGetCUBIN(nvrtcProgram prog, char *cubin);
300
+
301
+
302
+ #if defined(_WIN32)
303
+ # define __DEPRECATED__(msg) __declspec(deprecated(msg))
304
+ #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))
305
+ # define __DEPRECATED__(msg) __attribute__((deprecated))
306
+ #elif (defined(__GNUC__))
307
+ # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))
308
+ #else
309
+ # define __DEPRECATED__(msg)
310
+ #endif
311
+
312
+ /**
313
+ * \ingroup compilation
314
+ * \brief
315
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
316
+ * nvrtcGetLTOIRSize (and nvrtcGetLTOIR) instead.
317
+ */
318
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIRSize instead")
319
+ nvrtcResult nvrtcGetNVVMSize(nvrtcProgram prog, size_t *nvvmSizeRet);
320
+
321
+ /**
322
+ * \ingroup compilation
323
+ * \brief
324
+ * DEPRECATION NOTICE: This function will be removed in a future release. Please use
325
+ * nvrtcGetLTOIR (and nvrtcGetLTOIRSize) instead.
326
+ */
327
+ __DEPRECATED__("This function will be removed in a future release. Please use nvrtcGetLTOIR instead")
328
+ nvrtcResult nvrtcGetNVVM(nvrtcProgram prog, char *nvvm);
329
+
330
+ #undef __DEPRECATED__
331
+
332
+ /**
333
+ * \ingroup compilation
334
+ * \brief nvrtcGetLTOIRSize sets the value of \p LTOIRSizeRet with the size of the LTO IR
335
+ * generated by the previous compilation of \p prog. The value of
336
+ * LTOIRSizeRet is set to 0 if the program was not compiled with
337
+ * \c -dlto.
338
+ *
339
+ * \param [in] prog CUDA Runtime Compilation program.
340
+ * \param [out] LTOIRSizeRet Size of the generated LTO IR.
341
+ * \return
342
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
343
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
344
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
345
+ *
346
+ * \see ::nvrtcGetLTOIR
347
+ */
348
+ nvrtcResult nvrtcGetLTOIRSize(nvrtcProgram prog, size_t *LTOIRSizeRet);
349
+
350
+
351
+ /**
352
+ * \ingroup compilation
353
+ * \brief nvrtcGetLTOIR stores the LTO IR generated by the previous compilation
354
+ * of \p prog in the memory pointed by \p LTOIR. No LTO IR is available
355
+ * if the program was compiled without \c -dlto.
356
+ *
357
+ * \param [in] prog CUDA Runtime Compilation program.
358
+ * \param [out] LTOIR Compiled result.
359
+ * \return
360
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
361
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
362
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
363
+ *
364
+ * \see ::nvrtcGetLTOIRSize
365
+ */
366
+ nvrtcResult nvrtcGetLTOIR(nvrtcProgram prog, char *LTOIR);
367
+
368
+
369
+ /**
370
+ * \ingroup compilation
371
+ * \brief nvrtcGetOptiXIRSize sets the value of \p optixirSizeRet with the size of the OptiX IR
372
+ * generated by the previous compilation of \p prog. The value of
373
+ * nvrtcGetOptiXIRSize is set to 0 if the program was compiled with
374
+ * options incompatible with OptiX IR generation.
375
+ *
376
+ * \param [in] prog CUDA Runtime Compilation program.
377
+ * \param [out] optixirSizeRet Size of the generated LTO IR.
378
+ * \return
379
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
380
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
381
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
382
+ *
383
+ * \see ::nvrtcGetOptiXIR
384
+ */
385
+ nvrtcResult nvrtcGetOptiXIRSize(nvrtcProgram prog, size_t *optixirSizeRet);
386
+
387
+
388
+ /**
389
+ * \ingroup compilation
390
+ * \brief nvrtcGetOptiXIR stores the OptiX IR generated by the previous compilation
391
+ * of \p prog in the memory pointed by \p optixir. No OptiX IR is available
392
+ * if the program was compiled with options incompatible with OptiX IR generation.
393
+ *
394
+ * \param [in] prog CUDA Runtime Compilation program.
395
+ * \param [out] Optix IR Compiled result.
396
+ * \return
397
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
398
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
399
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
400
+ *
401
+ * \see ::nvrtcGetOptiXIRSize
402
+ */
403
+ nvrtcResult nvrtcGetOptiXIR(nvrtcProgram prog, char *optixir);
404
+
405
+ /**
406
+ * \ingroup compilation
407
+ * \brief nvrtcGetProgramLogSize sets \p logSizeRet with the size of the
408
+ * log generated by the previous compilation of \p prog (including the
409
+ * trailing \c NULL).
410
+ *
411
+ * Note that compilation log may be generated with warnings and informative
412
+ * messages, even when the compilation of \p prog succeeds.
413
+ *
414
+ * \param [in] prog CUDA Runtime Compilation program.
415
+ * \param [out] logSizeRet Size of the compilation log
416
+ * (including the trailing \c NULL).
417
+ * \return
418
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
419
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
420
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
421
+ *
422
+ * \see ::nvrtcGetProgramLog
423
+ */
424
+ nvrtcResult nvrtcGetProgramLogSize(nvrtcProgram prog, size_t *logSizeRet);
425
+
426
+
427
+ /**
428
+ * \ingroup compilation
429
+ * \brief nvrtcGetProgramLog stores the log generated by the previous
430
+ * compilation of \p prog in the memory pointed by \p log.
431
+ *
432
+ * \param [in] prog CUDA Runtime Compilation program.
433
+ * \param [out] log Compilation log.
434
+ * \return
435
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
436
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \endlink
437
+ * - \link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \endlink
438
+ *
439
+ * \see ::nvrtcGetProgramLogSize
440
+ */
441
+ nvrtcResult nvrtcGetProgramLog(nvrtcProgram prog, char *log);
442
+
443
+
444
+ /**
445
+ * \ingroup compilation
446
+ * \brief nvrtcAddNameExpression notes the given name expression
447
+ * denoting the address of a __global__ function
448
+ * or __device__/__constant__ variable.
449
+ *
450
+ * The identical name expression string must be provided on a subsequent
451
+ * call to nvrtcGetLoweredName to extract the lowered name.
452
+ * \param [in] prog CUDA Runtime Compilation program.
453
+ * \param [in] name_expression constant expression denoting the address of
454
+ * a __global__ function or __device__/__constant__ variable.
455
+ * \return
456
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
457
+ * - \link #nvrtcResult NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION \endlink
458
+ *
459
+ * \see ::nvrtcGetLoweredName
460
+ */
461
+ nvrtcResult nvrtcAddNameExpression(nvrtcProgram prog,
462
+ const char * const name_expression);
463
+
464
+ /**
465
+ * \ingroup compilation
466
+ * \brief nvrtcGetLoweredName extracts the lowered (mangled) name
467
+ * for a __global__ function or __device__/__constant__ variable,
468
+ * and updates *lowered_name to point to it. The memory containing
469
+ * the name is released when the NVRTC program is destroyed by
470
+ * nvrtcDestroyProgram.
471
+ * The identical name expression must have been previously
472
+ * provided to nvrtcAddNameExpression.
473
+ *
474
+ * \param [in] prog CUDA Runtime Compilation program.
475
+ * \param [in] name_expression constant expression denoting the address of
476
+ * a __global__ function or __device__/__constant__ variable.
477
+ * \param [out] lowered_name initialized by the function to point to a
478
+ * C string containing the lowered (mangled)
479
+ * name corresponding to the provided name expression.
480
+ * \return
481
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
482
+ * - \link #nvrtcResult NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION \endlink
483
+ * - \link #nvrtcResult NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID \endlink
484
+ *
485
+ * \see ::nvrtcAddNameExpression
486
+ */
487
+ nvrtcResult nvrtcGetLoweredName(nvrtcProgram prog,
488
+ const char *const name_expression,
489
+ const char** lowered_name);
490
+
491
+
492
+ /**
493
+ * \defgroup options Supported Compile Options
494
+ *
495
+ * NVRTC supports the compile options below.
496
+ * Option names with two preceding dashs (\c --) are long option names and
497
+ * option names with one preceding dash (\c -) are short option names.
498
+ * Short option names can be used instead of long option names.
499
+ * When a compile option takes an argument, an assignment operator (\c =)
500
+ * is used to separate the compile option argument from the compile option
501
+ * name, e.g., \c "--gpu-architecture=compute_60".
502
+ * Alternatively, the compile option name and the argument can be specified in
503
+ * separate strings without an assignment operator, .e.g,
504
+ * \c "--gpu-architecture" \c "compute_60".
505
+ * Single-character short option names, such as \c -D, \c -U, and \c -I, do
506
+ * not require an assignment operator, and the compile option name and the
507
+ * argument can be present in the same string with or without spaces between
508
+ * them.
509
+ * For instance, \c "-D=<def>", \c "-D<def>", and \c "-D <def>" are all
510
+ * supported.
511
+ *
512
+ * The valid compiler options are:
513
+ *
514
+ * - Compilation targets
515
+ * - \c --gpu-architecture=\<arch\> (\c -arch)\n
516
+ * Specify the name of the class of GPU architectures for which the
517
+ * input must be compiled.\n
518
+ * - Valid <c>\<arch\></c>s:
519
+ * - \c compute_50
520
+ * - \c compute_52
521
+ * - \c compute_53
522
+ * - \c compute_60
523
+ * - \c compute_61
524
+ * - \c compute_62
525
+ * - \c compute_70
526
+ * - \c compute_72
527
+ * - \c compute_75
528
+ * - \c compute_80
529
+ * - \c compute_87
530
+ * - \c compute_89
531
+ * - \c compute_90
532
+ * - \c compute_90a
533
+ * - \c sm_50
534
+ * - \c sm_52
535
+ * - \c sm_53
536
+ * - \c sm_60
537
+ * - \c sm_61
538
+ * - \c sm_62
539
+ * - \c sm_70
540
+ * - \c sm_72
541
+ * - \c sm_75
542
+ * - \c sm_80
543
+ * - \c sm_87
544
+ * - \c sm_89
545
+ * - \c sm_90
546
+ * - \c sm_90a
547
+ * - Default: \c compute_52
548
+ * - Separate compilation / whole-program compilation
549
+ * - \c --device-c (\c -dc)\n
550
+ * Generate relocatable code that can be linked with other relocatable
551
+ * device code. It is equivalent to --relocatable-device-code=true.
552
+ * - \c --device-w (\c -dw)\n
553
+ * Generate non-relocatable code. It is equivalent to
554
+ * \c --relocatable-device-code=false.
555
+ * - \c --relocatable-device-code={true|false} (\c -rdc)\n
556
+ * Enable (disable) the generation of relocatable device code.
557
+ * - Default: \c false
558
+ * - \c --extensible-whole-program (\c -ewp)\n
559
+ * Do extensible whole program compilation of device code.
560
+ * - Default: \c false
561
+ * - Debugging support
562
+ * - \c --device-debug (\c -G)\n
563
+ * Generate debug information. If --dopt is not specified,
564
+ * then turns off all optimizations.
565
+ * - \c --generate-line-info (\c -lineinfo)\n
566
+ * Generate line-number information.
567
+ * - Code generation
568
+ * - \c --dopt on (\c -dopt)\n
569
+ * - \c --dopt=on \n
570
+ * Enable device code optimization. When specified along with '-G', enables
571
+ * limited debug information generation for optimized device code (currently,
572
+ * only line number information).
573
+ * When '-G' is not specified, '-dopt=on' is implicit.
574
+ * - \c --ptxas-options \<options\> (\c -Xptxas)\n
575
+ * - \c --ptxas-options=\<options\> \n
576
+ * Specify options directly to ptxas, the PTX optimizing assembler.
577
+ * - \c --maxrregcount=\<N\> (\c -maxrregcount)\n
578
+ * Specify the maximum amount of registers that GPU functions can use.
579
+ * Until a function-specific limit, a higher value will generally
580
+ * increase the performance of individual GPU threads that execute this
581
+ * function. However, because thread registers are allocated from a
582
+ * global register pool on each GPU, a higher value of this option will
583
+ * also reduce the maximum thread block size, thereby reducing the amount
584
+ * of thread parallelism. Hence, a good maxrregcount value is the result
585
+ * of a trade-off. If this option is not specified, then no maximum is
586
+ * assumed. Value less than the minimum registers required by ABI will
587
+ * be bumped up by the compiler to ABI minimum limit.
588
+ * - \c --ftz={true|false} (\c -ftz)\n
589
+ * When performing single-precision floating-point operations, flush
590
+ * denormal values to zero or preserve denormal values.
591
+ * \c --use_fast_math implies \c --ftz=true.
592
+ * - Default: \c false
593
+ * - \c --prec-sqrt={true|false} (\c -prec-sqrt)\n
594
+ * For single-precision floating-point square root, use IEEE
595
+ * round-to-nearest mode or use a faster approximation.
596
+ * \c --use_fast_math implies \c --prec-sqrt=false.
597
+ * - Default: \c true
598
+ * - \c --prec-div={true|false} (\c -prec-div)\n
599
+ * For single-precision floating-point division and reciprocals, use IEEE
600
+ * round-to-nearest mode or use a faster approximation.
601
+ * \c --use_fast_math implies \c --prec-div=false.
602
+ * - Default: \c true
603
+ * - \c --fmad={true|false} (\c -fmad)\n
604
+ * Enables (disables) the contraction of floating-point multiplies and
605
+ * adds/subtracts into floating-point multiply-add operations (FMAD,
606
+ * FFMA, or DFMA). \c --use_fast_math implies \c --fmad=true.
607
+ * - Default: \c true
608
+ * - \c --use_fast_math (\c -use_fast_math)\n
609
+ * Make use of fast math operations.
610
+ * \c --use_fast_math implies \c --ftz=true \c --prec-div=false
611
+ * \c --prec-sqrt=false \c --fmad=true.
612
+ * - \c --extra-device-vectorization (\c -extra-device-vectorization)\n
613
+ * Enables more aggressive device code vectorization in the NVVM optimizer.
614
+ * - \c --modify-stack-limit={true|false} (\c -modify-stack-limit)\n
615
+ * On Linux, during compilation, use \c setrlimit() to increase stack size
616
+ * to maximum allowed. The limit is reset to the previous value at the
617
+ * end of compilation.
618
+ * Note: \c setrlimit() changes the value for the entire process.
619
+ * - Default: \c true
620
+ * - \c --dlink-time-opt (\c -dlto)\n
621
+ * Generate intermediate code for later link-time optimization.
622
+ * It implies \c -rdc=true.
623
+ * Note: when this option is used the nvrtcGetLTOIR API should be used,
624
+ * as PTX or Cubin will not be generated.
625
+ * - \c --gen-opt-lto (\c -gen-opt-lto)\n
626
+ * Run the optimizer passes before generating the LTO IR.
627
+ * - \c --optix-ir (\c -optix-ir)\n
628
+ * Generate OptiX IR. The Optix IR is only intended for consumption by OptiX
629
+ * through appropriate APIs. This feature is not supported with
630
+ * link-time-optimization (\c -dlto)\n.
631
+ * Note: when this option is used the nvrtcGetOptiX API should be used,
632
+ * as PTX or Cubin will not be generated.
633
+ * - Preprocessing
634
+ * - \c --define-macro=\<def\> (\c -D)\n
635
+ * \c \<def\> can be either \c \<name\> or \c \<name=definitions\>.
636
+ * - \c \<name\> \n
637
+ * Predefine \c \<name\> as a macro with definition \c 1.
638
+ * - \c \<name\>=\<definition\> \n
639
+ * The contents of \c \<definition\> are tokenized and preprocessed
640
+ * as if they appeared during translation phase three in a \c \#define
641
+ * directive. In particular, the definition will be truncated by
642
+ * embedded new line characters.
643
+ * - \c --undefine-macro=\<def\> (\c -U)\n
644
+ * Cancel any previous definition of \c \<def\>.
645
+ * - \c --include-path=\<dir\> (\c -I)\n
646
+ * Add the directory \c \<dir\> to the list of directories to be
647
+ * searched for headers. These paths are searched after the list of
648
+ * headers given to ::nvrtcCreateProgram.
649
+ * - \c --pre-include=\<header\> (\c -include)\n
650
+ * Preinclude \c \<header\> during preprocessing.
651
+ * - \c --no-source-include (\c -no-source-include)
652
+ * The preprocessor by default adds the directory of each input sources
653
+ * to the include path. This option disables this feature and only
654
+ * considers the path specified explicitly.
655
+ * - Language Dialect
656
+ * - \c --std={c++03|c++11|c++14|c++17|c++20}
657
+ * (\c -std={c++11|c++14|c++17|c++20})\n
658
+ * Set language dialect to C++03, C++11, C++14, C++17 or C++20
659
+ * - Default: \c c++17
660
+ * - \c --builtin-move-forward={true|false} (\c -builtin-move-forward)\n
661
+ * Provide builtin definitions of \c std::move and \c std::forward,
662
+ * when C++11 or later language dialect is selected.
663
+ * - Default: \c true
664
+ * - \c --builtin-initializer-list={true|false}
665
+ * (\c -builtin-initializer-list)\n
666
+ * Provide builtin definitions of \c std::initializer_list class and
667
+ * member functions when C++11 or later language dialect is selected.
668
+ * - Default: \c true
669
+ * - Misc.
670
+ * - \c --disable-warnings (\c -w)\n
671
+ * Inhibit all warning messages.
672
+ * - \c --restrict (\c -restrict)\n
673
+ * Programmer assertion that all kernel pointer parameters are restrict
674
+ * pointers.
675
+ * - \c --device-as-default-execution-space
676
+ * (\c -default-device)\n
677
+ * Treat entities with no execution space annotation as \c __device__
678
+ * entities.
679
+ * - \c --device-int128 (\c -device-int128)\n
680
+ * Allow the \c __int128 type in device code. Also causes the macro \c __CUDACC_RTC_INT128__
681
+ * to be defined.
682
+ * - \c --optimization-info=\<kind\> (\c -opt-info)\n
683
+ * Provide optimization reports for the specified kind of optimization.
684
+ * The following kind tags are supported:
685
+ * - \c inline : emit a remark when a function is inlined.
686
+ * - \c --version-ident={true|false} (\c -dQ)\n
687
+ * Embed used compiler's version info into generated PTX/CUBIN
688
+ * - Default: \c false
689
+ * - \c --display-error-number (\c -err-no)\n
690
+ * Display diagnostic number for warning messages. (Default)
691
+ * - \c --no-display-error-number (\c -no-err-no)\n
692
+ * Disables the display of a diagnostic number for warning messages.
693
+ * - \c --diag-error=<error-number>,... (\c -diag-error)\n
694
+ * Emit error for specified diagnostic message number(s). Message numbers can be separated by comma.
695
+ * - \c --diag-suppress=<error-number>,... (\c -diag-suppress)\n
696
+ * Suppress specified diagnostic message number(s). Message numbers can be separated by comma.
697
+ * - \c --diag-warn=<error-number>,... (\c -diag-warn)\n
698
+ * Emit warning for specified diagnostic message number(s). Message numbers can be separated by comma.
699
+ * - \c --brief-diagnostics={true|false} (\c -brief-diag)\n
700
+ * This option disables or enables showing source line and column info
701
+ * in a diagnostic.
702
+ * The --brief-diagnostics=true will not show the source line and column info.
703
+ * - Default: \c false
704
+ * - \c --time=<file-name> (\c -time)\n
705
+ * Generate a comma separated value table with the time taken by each compilation
706
+ * phase, and append it at the end of the file given as the option argument.
707
+ * If the file does not exist, the column headings are generated in the first row
708
+ * of the table. If the file name is '-', the timing data is written to the compilation log.
709
+ *
710
+ */
711
+
712
+
713
+ #ifdef __cplusplus
714
+ }
715
+ #endif /* __cplusplus */
716
+
717
+
718
+ /* The utility function 'nvrtcGetTypeName' is not available by default. Define
719
+ the macro 'NVRTC_GET_TYPE_NAME' to a non-zero value to make it available.
720
+ */
721
+
722
+ #if NVRTC_GET_TYPE_NAME || __DOXYGEN_ONLY__
723
+
724
+ #if NVRTC_USE_CXXABI || __clang__ || __GNUC__ || __DOXYGEN_ONLY__
725
+ #include <cxxabi.h>
726
+ #include <cstdlib>
727
+
728
+ #elif defined(_WIN32)
729
+ #include <Windows.h>
730
+ #include <DbgHelp.h>
731
+ #endif /* NVRTC_USE_CXXABI || __clang__ || __GNUC__ */
732
+
733
+
734
+ #include <string>
735
+ #include <typeinfo>
736
+
737
+ template <typename T> struct __nvrtcGetTypeName_helper_t { };
738
+
739
+ /*************************************************************************//**
740
+ *
741
+ * \defgroup hosthelper Host Helper
742
+ *
743
+ * NVRTC defines the following functions for easier interaction with host code.
744
+ *
745
+ ****************************************************************************/
746
+
747
+ /**
748
+ * \ingroup hosthelper
749
+ * \brief nvrtcGetTypeName stores the source level name of a type in the given
750
+ * std::string location.
751
+ *
752
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
753
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
754
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
755
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
756
+ * otherwise *result is initialized with the extracted name.
757
+ *
758
+ * Windows-specific notes:
759
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
760
+ * which is not multi-thread safe.
761
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
762
+ *
763
+ * \param [in] tinfo: reference to object of type std::type_info for a given type.
764
+ * \param [in] result: pointer to std::string in which to store the type name.
765
+ * \return
766
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
767
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
768
+ *
769
+ */
770
+ inline nvrtcResult nvrtcGetTypeName(const std::type_info &tinfo, std::string *result)
771
+ {
772
+ #if USE_CXXABI || __clang__ || __GNUC__
773
+ const char *name = tinfo.name();
774
+ int status;
775
+ char *undecorated_name = abi::__cxa_demangle(name, 0, 0, &status);
776
+ if (status == 0) {
777
+ *result = undecorated_name;
778
+ free(undecorated_name);
779
+ return NVRTC_SUCCESS;
780
+ }
781
+ #elif defined(_WIN32)
782
+ const char *name = tinfo.raw_name();
783
+ if (!name || *name != '.') {
784
+ return NVRTC_ERROR_INTERNAL_ERROR;
785
+ }
786
+ char undecorated_name[4096];
787
+ //name+1 skips over the '.' prefix
788
+ if(UnDecorateSymbolName(name+1, undecorated_name,
789
+ sizeof(undecorated_name) / sizeof(*undecorated_name),
790
+ //note: doesn't seem to work correctly without UNDNAME_NO_ARGUMENTS.
791
+ UNDNAME_NO_ARGUMENTS | UNDNAME_NAME_ONLY ) ) {
792
+ *result = undecorated_name;
793
+ return NVRTC_SUCCESS;
794
+ }
795
+ #endif /* USE_CXXABI || __clang__ || __GNUC__ */
796
+
797
+ return NVRTC_ERROR_INTERNAL_ERROR;
798
+ }
799
+
800
+ /**
801
+ * \ingroup hosthelper
802
+ * \brief nvrtcGetTypeName stores the source level name of the template type argument
803
+ * T in the given std::string location.
804
+ *
805
+ * This function is only provided when the macro NVRTC_GET_TYPE_NAME is
806
+ * defined with a non-zero value. It uses abi::__cxa_demangle or UnDecorateSymbolName
807
+ * function calls to extract the type name, when using gcc/clang or cl.exe compilers,
808
+ * respectively. If the name extraction fails, it will return NVRTC_INTERNAL_ERROR,
809
+ * otherwise *result is initialized with the extracted name.
810
+ *
811
+ * Windows-specific notes:
812
+ * - nvrtcGetTypeName() is not multi-thread safe because it calls UnDecorateSymbolName(),
813
+ * which is not multi-thread safe.
814
+ * - The returned string may contain Microsoft-specific keywords such as __ptr64 and __cdecl.
815
+ *
816
+ * \param [in] result: pointer to std::string in which to store the type name.
817
+ * \return
818
+ * - \link #nvrtcResult NVRTC_SUCCESS \endlink
819
+ * - \link #nvrtcResult NVRTC_ERROR_INTERNAL_ERROR \endlink
820
+ *
821
+ */
822
+
823
+ template <typename T>
824
+ nvrtcResult nvrtcGetTypeName(std::string *result)
825
+ {
826
+ nvrtcResult res = nvrtcGetTypeName(typeid(__nvrtcGetTypeName_helper_t<T>),
827
+ result);
828
+ if (res != NVRTC_SUCCESS)
829
+ return res;
830
+
831
+ std::string repr = *result;
832
+ std::size_t idx = repr.find("__nvrtcGetTypeName_helper_t");
833
+ idx = (idx != std::string::npos) ? repr.find("<", idx) : idx;
834
+ std::size_t last_idx = repr.find_last_of('>');
835
+ if (idx == std::string::npos || last_idx == std::string::npos) {
836
+ return NVRTC_ERROR_INTERNAL_ERROR;
837
+ }
838
+ ++idx;
839
+ *result = repr.substr(idx, last_idx - idx);
840
+ return NVRTC_SUCCESS;
841
+ }
842
+
843
+ #endif /* NVRTC_GET_TYPE_NAME */
844
+
845
+ #endif /* __NVRTC_H__ */
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/nvidia/cuda_nvrtc/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/builtin_types.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ /*******************************************************************************
51
+ * *
52
+ * *
53
+ * *
54
+ *******************************************************************************/
55
+
56
+ #include "device_types.h"
57
+ #if !defined(__CUDACC_RTC__)
58
+ #define EXCLUDE_FROM_RTC
59
+ #include "driver_types.h"
60
+ #undef EXCLUDE_FROM_RTC
61
+ #endif /* !__CUDACC_RTC__ */
62
+ #include "surface_types.h"
63
+ #include "texture_types.h"
64
+ #include "vector_types.h"
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/common_functions.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 1993-2018 NVIDIA Corporation. All rights reserved.
3
+ *
4
+ * NOTICE TO LICENSEE:
5
+ *
6
+ * This source code and/or documentation ("Licensed Deliverables") are
7
+ * subject to NVIDIA intellectual property rights under U.S. and
8
+ * international Copyright laws.
9
+ *
10
+ * These Licensed Deliverables contained herein is PROPRIETARY and
11
+ * CONFIDENTIAL to NVIDIA and is being provided under the terms and
12
+ * conditions of a form of NVIDIA software license agreement by and
13
+ * between NVIDIA and Licensee ("License Agreement") or electronically
14
+ * accepted by Licensee. Notwithstanding any terms or conditions to
15
+ * the contrary in the License Agreement, reproduction or disclosure
16
+ * of the Licensed Deliverables to any third party without the express
17
+ * written consent of NVIDIA is prohibited.
18
+ *
19
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
20
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
21
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
22
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
23
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
24
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
25
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
26
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
27
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
28
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
29
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
30
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
31
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
32
+ * OF THESE LICENSED DELIVERABLES.
33
+ *
34
+ * U.S. Government End Users. These Licensed Deliverables are a
35
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
36
+ * 1995), consisting of "commercial computer software" and "commercial
37
+ * computer software documentation" as such terms are used in 48
38
+ * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
39
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
40
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
41
+ * U.S. Government End Users acquire the Licensed Deliverables with
42
+ * only those rights set forth herein.
43
+ *
44
+ * Any use of the Licensed Deliverables in individual and commercial
45
+ * software must include, in the user documentation and internal
46
+ * comments to the code, the above Disclaimer and U.S. Government End
47
+ * Users Notice.
48
+ */
49
+
50
+ #if !defined(__CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__)
51
+ #if defined(_MSC_VER)
52
+ #pragma message("common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead.")
53
+ #else
54
+ #warning "common_functions.h is an internal header file and must not be used directly. This file will be removed in a future CUDA release. Please use cuda_runtime_api.h or cuda_runtime.h instead."
55
+ #endif
56
+ #define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
57
+ #define __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
58
+ #endif
59
+
60
+ #include "crt/common_functions.h"
61
+
62
+ #if defined(__UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__)
63
+ #undef __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
64
+ #undef __UNDEF_CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS_COMMON_FUNCTIONS_H_WRAPPER__
65
+ #endif
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/async.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_ASYNC_H
50
+ #define _CG_ASYNC_H
51
+
52
+ #include "helpers.h"
53
+ #include "info.h"
54
+
55
+ #include <cuda_pipeline.h>
56
+
57
+ _CG_BEGIN_NAMESPACE
58
+
59
+ namespace details {
60
+ // Groups supported by memcpy_async
61
+ template <class TyGroup>
62
+ struct _async_copy_group_supported : public _CG_STL_NAMESPACE::false_type {};
63
+
64
+ template <unsigned int Sz, typename TyPar>
65
+ struct _async_copy_group_supported<cooperative_groups::thread_block_tile<Sz, TyPar>>
66
+ : public _CG_STL_NAMESPACE::true_type {};
67
+ template <>
68
+ struct _async_copy_group_supported<cooperative_groups::coalesced_group> : public _CG_STL_NAMESPACE::true_type {};
69
+ template <>
70
+ struct _async_copy_group_supported<cooperative_groups::thread_block> : public _CG_STL_NAMESPACE::true_type {};
71
+
72
+ template <class TyGroup>
73
+ using async_copy_group_supported = _async_copy_group_supported<details::remove_qual<TyGroup>>;
74
+
75
+ // Groups that require optimization
76
+ template <class TyGroup>
77
+ struct _async_copy_optimize_tile : public _CG_STL_NAMESPACE::false_type {};
78
+
79
+ template <typename TyPar>
80
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<1, TyPar>>
81
+ : public _CG_STL_NAMESPACE::false_type {};
82
+
83
+ template <unsigned int Sz, typename TyPar>
84
+ struct _async_copy_optimize_tile<cooperative_groups::thread_block_tile<Sz, TyPar>>
85
+ : public _CG_STL_NAMESPACE::true_type {};
86
+
87
+ template <class TyGroup>
88
+ using async_copy_optimize_tile = _async_copy_optimize_tile<details::remove_qual<TyGroup>>;
89
+
90
+ // SFINAE helpers for tile optimizations
91
+ template <class TyGroup>
92
+ using enable_tile_optimization =
93
+ typename _CG_STL_NAMESPACE::enable_if<async_copy_optimize_tile<TyGroup>::value, void *>::type;
94
+
95
+ template <class TyGroup>
96
+ using disable_tile_optimization =
97
+ typename _CG_STL_NAMESPACE::enable_if<!async_copy_optimize_tile<TyGroup>::value, void *>::type;
98
+
99
+ // Segment for punning to aligned types
100
+ template <unsigned int N>
101
+ struct _Segment {
102
+ int _seg[N];
103
+ };
104
+
105
+ // Trivial layout guaranteed-aligned copy-async compatible segments
106
+ template <unsigned int N>
107
+ struct Segment;
108
+ template <>
109
+ struct __align__(4) Segment<1> : public _Segment<1>{};
110
+ template <>
111
+ struct __align__(8) Segment<2> : public _Segment<2>{};
112
+ template <>
113
+ struct __align__(16) Segment<4> : public _Segment<4>{};
114
+
115
+ // Interleaved element by element copies from source to dest
116
+ template <typename TyGroup, typename TyElem>
117
+ _CG_STATIC_QUALIFIER void inline_copy(TyGroup &group, TyElem *__restrict__ dst, const TyElem *__restrict__ src,
118
+ size_t count) {
119
+ const unsigned int rank = group.thread_rank();
120
+ const unsigned int stride = group.size();
121
+
122
+ for (size_t idx = rank; idx < count; idx += stride) {
123
+ dst[idx] = src[idx];
124
+ }
125
+ }
126
+
127
+ template <typename TyGroup, typename TyElem, enable_tile_optimization<TyGroup> = nullptr>
128
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
129
+ const TyElem *__restrict__ src, size_t count) {
130
+ static_assert(async_copy_group_supported<TyGroup>::value,
131
+ "Async copy is only supported for groups that represent private shared memory");
132
+
133
+ if (count == 0) {
134
+ return;
135
+ }
136
+
137
+ const bool dstIsNotShared = !__isShared(dst);
138
+ const bool srcIsNotGlobal = !__isGlobal(src);
139
+
140
+ if (dstIsNotShared || srcIsNotGlobal) {
141
+ inline_copy(group, dst, src, count);
142
+ return;
143
+ }
144
+
145
+ const unsigned int stride = group.size();
146
+ const unsigned int rank = group.thread_rank();
147
+ // Efficient copies require warps to operate on the same amount of work at each step.
148
+ // remainders are handled in a separate stage to prevent branching
149
+ const unsigned int subWarpMask = (stride - 1);
150
+ const unsigned int subwarpCopies = (subWarpMask & (unsigned int)count);
151
+ const unsigned int maxSubwarpRank = min(rank, subwarpCopies - 1);
152
+
153
+ const size_t warpCopies = (count & (~subWarpMask));
154
+
155
+ for (size_t idx = 0; idx < warpCopies; idx += stride) {
156
+ size_t _srcIdx = rank + idx;
157
+ size_t _dstIdx = rank + idx;
158
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
159
+ }
160
+
161
+ if (subwarpCopies) {
162
+ size_t _srcIdx = warpCopies + maxSubwarpRank;
163
+ size_t _dstIdx = warpCopies + maxSubwarpRank;
164
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
165
+ }
166
+ }
167
+
168
+ template <typename TyGroup, typename TyElem, disable_tile_optimization<TyGroup> = nullptr>
169
+ _CG_STATIC_QUALIFIER void accelerated_async_copy(TyGroup &group, TyElem *__restrict__ dst,
170
+ const TyElem *__restrict__ src, size_t count) {
171
+ static_assert(async_copy_group_supported<TyGroup>::value,
172
+ "Async copy is only supported for groups that represent private shared memory");
173
+
174
+ const bool dstIsNotShared = !__isShared(dst);
175
+ const bool srcIsNotGlobal = !__isGlobal(src);
176
+
177
+ if (dstIsNotShared || srcIsNotGlobal) {
178
+ inline_copy(group, dst, src, count);
179
+ return;
180
+ }
181
+
182
+ unsigned int stride = group.size();
183
+ unsigned int rank = group.thread_rank();
184
+
185
+ for (size_t idx = rank; idx < count; idx += stride) {
186
+ size_t _srcIdx = idx;
187
+ size_t _dstIdx = idx;
188
+ __pipeline_memcpy_async(dst + _dstIdx, src + _srcIdx, sizeof(TyElem));
189
+ }
190
+ }
191
+
192
+ // Determine best possible alignment given an input and initial conditions
193
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
194
+ template <unsigned int MinAlignment, unsigned int MaxAlignment>
195
+ _CG_STATIC_QUALIFIER uint32_t find_best_alignment(void *__restrict__ dst, const void *__restrict__ src) {
196
+ // Narrowing conversion intentional
197
+ uint32_t base1 = (uint32_t) reinterpret_cast<uintptr_t>(src);
198
+ uint32_t base2 = (uint32_t) reinterpret_cast<uintptr_t>(dst);
199
+
200
+ uint32_t diff = ((base1) ^ (base2)) & (MaxAlignment - 1);
201
+
202
+ // range [MaxAlignment, alignof(elem)], step: x >> 1
203
+ // over range of possible alignments, choose best available out of range
204
+ uint32_t out = MaxAlignment;
205
+ #pragma unroll
206
+ for (uint32_t alignment = (MaxAlignment >> 1); alignment >= MinAlignment; alignment >>= 1) {
207
+ if (alignment & diff)
208
+ out = alignment;
209
+ }
210
+
211
+ return out;
212
+ }
213
+
214
+ // Determine best possible alignment given an input and initial conditions
215
+ // Attempts to generate as little code as possible, most likely should only be used with 1 and 2 byte alignments
216
+ template <typename TyType, typename TyGroup>
217
+ _CG_STATIC_QUALIFIER void copy_like(const TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
218
+ size_t count) {
219
+ const char *src = reinterpret_cast<const char *>(_src);
220
+ char *dst = reinterpret_cast<char *>(_dst);
221
+
222
+ constexpr uint32_t targetAlignment = (uint32_t)alignof(TyType);
223
+
224
+ uint32_t base = (uint32_t) reinterpret_cast<uintptr_t>(src);
225
+ uint32_t alignOffset = ((~base) + 1) & (targetAlignment - 1);
226
+
227
+ inline_copy(group, dst, src, alignOffset);
228
+ count -= alignOffset;
229
+ src += alignOffset;
230
+ dst += alignOffset;
231
+
232
+ // Copy using the best available alignment, async_copy expects n-datums, not bytes
233
+ size_t asyncCount = count / sizeof(TyType);
234
+ accelerated_async_copy(group, reinterpret_cast<TyType *>(dst), reinterpret_cast<const TyType *>(src), asyncCount);
235
+ asyncCount *= sizeof(TyType);
236
+
237
+ count -= asyncCount;
238
+ src += asyncCount;
239
+ dst += asyncCount;
240
+ inline_copy(group, dst, src, count);
241
+ }
242
+
243
+ // We must determine alignment and manually align src/dst ourselves
244
+ template <size_t AlignHint>
245
+ struct _memcpy_async_align_dispatch {
246
+ template <typename TyGroup>
247
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ dst, const void *__restrict__ src, size_t count) {
248
+ uint32_t alignment = find_best_alignment<AlignHint, 16>(dst, src);
249
+
250
+ // Avoid copying the extra bytes if desired copy count is smaller
251
+ alignment = count < alignment ? AlignHint : alignment;
252
+
253
+ switch (alignment) {
254
+ default:
255
+ case 1:
256
+ inline_copy(group, reinterpret_cast<char *>(dst), reinterpret_cast<const char *>(src), count);
257
+ break;
258
+ case 2:
259
+ inline_copy(group, reinterpret_cast<short *>(dst), reinterpret_cast<const short *>(src), count >> 1);
260
+ break;
261
+ case 4:
262
+ copy_like<Segment<1>>(group, dst, src, count);
263
+ break;
264
+ case 8:
265
+ copy_like<Segment<2>>(group, dst, src, count);
266
+ break;
267
+ case 16:
268
+ copy_like<Segment<4>>(group, dst, src, count);
269
+ break;
270
+ }
271
+ }
272
+ };
273
+
274
+ // Specialization for 4 byte alignments
275
+ template <>
276
+ struct _memcpy_async_align_dispatch<4> {
277
+ template <typename TyGroup>
278
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
279
+ size_t count) {
280
+ const Segment<1> *src = reinterpret_cast<const Segment<1> *>(_src);
281
+ Segment<1> *dst = reinterpret_cast<Segment<1> *>(_dst);
282
+
283
+ // Dispatch straight to aligned LDGSTS calls
284
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
285
+ }
286
+ };
287
+
288
+ // Specialization for 8 byte alignments
289
+ template <>
290
+ struct _memcpy_async_align_dispatch<8> {
291
+ template <typename TyGroup>
292
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
293
+ size_t count) {
294
+ const Segment<2> *src = reinterpret_cast<const Segment<2> *>(_src);
295
+ Segment<2> *dst = reinterpret_cast<Segment<2> *>(_dst);
296
+
297
+ // Dispatch straight to aligned LDGSTS calls
298
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
299
+ }
300
+ };
301
+
302
+ // Alignments over 16 are truncated to 16 and bypass alignment
303
+ // This is the highest performing memcpy available
304
+ template <>
305
+ struct _memcpy_async_align_dispatch<16> {
306
+ template <typename TyGroup>
307
+ _CG_STATIC_QUALIFIER void copy(TyGroup &group, void *__restrict__ _dst, const void *__restrict__ _src,
308
+ size_t count) {
309
+ const Segment<4> *src = reinterpret_cast<const Segment<4> *>(_src);
310
+ Segment<4> *dst = reinterpret_cast<Segment<4> *>(_dst);
311
+
312
+ // Dispatch straight to aligned LDGSTS calls
313
+ accelerated_async_copy(group, dst, src, count / sizeof(*dst));
314
+ }
315
+ };
316
+
317
+ // byte-wide API
318
+ template <size_t Alignment, class TyGroup>
319
+ _CG_STATIC_QUALIFIER void _memcpy_async_dispatch_to_aligned_copy(const TyGroup &group, void *__restrict__ _dst,
320
+ const void *__restrict__ _src, size_t count) {
321
+ static_assert(!(Alignment & (Alignment - 1)), "Known static alignment dispatch must be a power of 2");
322
+ details::_memcpy_async_align_dispatch<Alignment>::copy(group, _dst, _src, count);
323
+ }
324
+
325
+ // Internal dispatch APIs
326
+ // These deduce the alignments and sizes necessary to invoke the underlying copy engine
327
+ template <typename Ty>
328
+ using is_void = _CG_STL_NAMESPACE::is_same<Ty, void>;
329
+
330
+ template <typename Ty>
331
+ using enable_if_not_void = typename _CG_STL_NAMESPACE::enable_if<!is_void<Ty>::value, void *>::type;
332
+
333
+ template <typename Ty>
334
+ using enable_if_void = typename _CG_STL_NAMESPACE::enable_if<is_void<Ty>::value, void *>::type;
335
+
336
+ template <typename Ty>
337
+ using enable_if_integral =
338
+ typename _CG_STL_NAMESPACE::enable_if<_CG_STL_NAMESPACE::is_integral<Ty>::value, void *>::type;
339
+
340
+ // byte-wide API using aligned_sized_t
341
+ template <class TyGroup, template <size_t> typename Alignment, size_t Hint>
342
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, void *__restrict__ _dst,
343
+ const void *__restrict__ _src, const Alignment<Hint> &count) {
344
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
345
+
346
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, (size_t)count);
347
+ }
348
+
349
+ // byte-wide API using type for aligment
350
+ template <class TyGroup, typename TyElem, typename TySize, size_t Hint = alignof(TyElem),
351
+ enable_if_not_void<TyElem> = nullptr, enable_if_integral<TySize> = nullptr>
352
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
353
+ const TyElem *__restrict__ _src, const TySize& count) {
354
+ constexpr size_t _align = (Hint > 16) ? 16 : Hint;
355
+
356
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, _dst, _src, count);
357
+ }
358
+
359
+ // byte-wide API with full alignment deduction required
360
+ template <class TyGroup, typename TyElem, typename TySize, enable_if_void<TyElem> = nullptr,
361
+ enable_if_integral<TySize> = nullptr>
362
+ _CG_STATIC_QUALIFIER void _memcpy_async_bytes(const TyGroup &group, TyElem *__restrict__ _dst,
363
+ const TyElem *__restrict__ _src, const TySize& count) {
364
+ details::_memcpy_async_dispatch_to_aligned_copy<1>(group, _dst, _src, count);
365
+ }
366
+
367
+ // 1d-datum API
368
+ template <class TyGroup, typename TyElem, size_t Hint = alignof(TyElem)>
369
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const size_t dstCount,
370
+ const TyElem *__restrict__ src, const size_t srcCount) {
371
+ constexpr unsigned int _align = Hint;
372
+ const size_t totalCount = min(dstCount, srcCount) * sizeof(TyElem);
373
+
374
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
375
+ }
376
+
377
+ // 1d-datum API using aligned_size_t
378
+ template <class TyGroup, typename TyElem, template <size_t> typename Alignment, size_t Hint>
379
+ _CG_STATIC_QUALIFIER void _memcpy_async_datum(const TyGroup &group, TyElem *__restrict__ dst, const Alignment<Hint> &dstCount,
380
+ const TyElem *__restrict__ src, const Alignment<Hint> &srcCount) {
381
+ constexpr unsigned int _align = Hint;
382
+ const size_t totalCount = min((size_t)dstCount, (size_t)srcCount) * sizeof(TyElem);
383
+
384
+ details::_memcpy_async_dispatch_to_aligned_copy<_align>(group, dst, src, totalCount);
385
+ }
386
+
387
+ } // namespace details
388
+
389
+ /*
390
+ * Group submit batch of async-copy to cover contiguous 1D array
391
+ * and commit that batch to eventually wait for completion.
392
+ */
393
+ template <class TyGroup, typename TyElem, typename TySizeT>
394
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ _dst, const TyElem *__restrict__ _src,
395
+ const TySizeT &count) {
396
+ details::_memcpy_async_bytes(group, _dst, _src, count);
397
+ __pipeline_commit();
398
+ }
399
+
400
+ /*
401
+ * Group submit batch of async-copy to cover contiguous 1D array
402
+ * and commit that batch to eventually wait for completion.
403
+ * Object counts are in datum sized chunks, not bytes.
404
+ */
405
+ template <class TyGroup, class TyElem, typename DstLayout, typename SrcLayout>
406
+ _CG_STATIC_QUALIFIER void memcpy_async(const TyGroup &group, TyElem *__restrict__ dst, const DstLayout &dstLayout,
407
+ const TyElem *__restrict__ src, const SrcLayout &srcLayout) {
408
+ details::_memcpy_async_datum(group, dst, dstLayout, src, srcLayout);
409
+ __pipeline_commit();
410
+ }
411
+
412
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
413
+ template <unsigned int Stage, class TyGroup>
414
+ _CG_STATIC_QUALIFIER void wait_prior(const TyGroup &group) {
415
+ __pipeline_wait_prior(Stage);
416
+ group.sync();
417
+ }
418
+
419
+ /* Group wait all previously submitted memcpy_async to complete. */
420
+ template <class TyGroup>
421
+ _CG_STATIC_QUALIFIER void wait(const TyGroup &group) {
422
+ __pipeline_wait_prior(0);
423
+ group.sync();
424
+ }
425
+
426
+ /***************** CG APIs including pipeline are deprecated *****************/
427
+
428
+ /* Group submit batch of async-copy to cover of contiguous 1D array
429
+ to a pipeline and commit the batch*/
430
+ template <class TyGroup, class TyElem>
431
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void memcpy_async(TyGroup &group, TyElem *dst, size_t dstCount, const TyElem *src, size_t srcCount,
432
+ nvcuda::experimental::pipeline &pipe) {
433
+ details::_memcpy_async_datum(group, dst, dstCount, src, srcCount);
434
+ pipe.commit();
435
+ }
436
+
437
+ /* Group wait for prior Nth stage of memcpy_async to complete. */
438
+ template <unsigned int Stage, class TyGroup>
439
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait_prior(TyGroup &group, nvcuda::experimental::pipeline &pipe) {
440
+ pipe.wait_prior<Stage>();
441
+ group.sync();
442
+ }
443
+
444
+ /* Group wait for stage-S of memcpy_async to complete. */
445
+ template <class TyGroup>
446
+ _CG_DEPRECATED _CG_STATIC_QUALIFIER void wait(TyGroup &group, nvcuda::experimental::pipeline &pipe, size_t stage) {
447
+ pipe.wait(stage);
448
+ group.sync();
449
+ }
450
+ _CG_END_NAMESPACE
451
+
452
+ #endif // _CG_ASYNC_H
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_reduce.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_REDUCE_H_
50
+ #define _CG_COALESCED_REDUCE_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "coalesced_scan.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto coalesced_reduce_to_one(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ if (group.size() == 32) {
65
+ auto out = val;
66
+ for (int offset = group.size() >> 1; offset > 0; offset >>= 1) {
67
+ out = op(out, group.shfl_up(out, offset));
68
+ }
69
+ return out;
70
+ }
71
+ else {
72
+ auto scan_result =
73
+ inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
74
+ return scan_result;
75
+ }
76
+ }
77
+
78
+ template <typename TyVal, typename TyOp>
79
+ _CG_QUALIFIER auto coalesced_reduce(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
80
+ auto out = coalesced_reduce_to_one(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
81
+ if (group.size() == 32) {
82
+ return group.shfl(out, 31);
83
+ }
84
+ else {
85
+ unsigned int group_mask = _coalesced_group_data_access::get_mask(group);
86
+ unsigned int last_thread_id = 31 - __clz(group_mask);
87
+ return details::tile::shuffle_dispatch<TyVal>::shfl(
88
+ _CG_STL_NAMESPACE::forward<TyVal>(out), group_mask, last_thread_id, 32);
89
+ }
90
+ }
91
+
92
+ template <typename TyVal, typename TyOp, unsigned int TySize, typename ParentT>
93
+ _CG_QUALIFIER auto coalesced_reduce(const __single_warp_thread_block_tile<TySize, ParentT>& group,
94
+ TyVal&& val,
95
+ TyOp&& op) -> decltype(op(val, val)) {
96
+ auto out = val;
97
+ for (int mask = TySize >> 1; mask > 0; mask >>= 1) {
98
+ out = op(out, group.shfl_xor(out, mask));
99
+ }
100
+
101
+ return out;
102
+ }
103
+
104
+ } // details
105
+
106
+ _CG_END_NAMESPACE
107
+
108
+ #endif // _CG_COALESCED_REDUCE_H_
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/coalesced_scan.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_COALESCED_SCAN_H_
50
+ #define _CG_COALESCED_SCAN_H_
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+ #include "cooperative_groups.h"
55
+ #include "partitioning.h"
56
+ #include "functional.h"
57
+
58
+ _CG_BEGIN_NAMESPACE
59
+
60
+ namespace details {
61
+
62
+ template <typename TyGroup, typename TyVal, typename TyOp>
63
+ _CG_QUALIFIER auto inclusive_scan_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
64
+ auto out = val;
65
+ for (int mask = 1; mask < group.size(); mask <<= 1) {
66
+ auto tmp = group.shfl_up(out, mask);
67
+ if (mask <= group.thread_rank()) {
68
+ out = op(out, tmp);
69
+ }
70
+ }
71
+
72
+ return out;
73
+ }
74
+
75
+ template <typename TyGroup, typename TyVal, typename TyOp>
76
+ _CG_QUALIFIER auto inclusive_scan_non_contiguous(const TyGroup& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
77
+ const unsigned int groupSize = group.size();
78
+ auto out = val;
79
+
80
+ const unsigned int mask = details::_coalesced_group_data_access::get_mask(group);
81
+ unsigned int lanemask = details::lanemask32_lt() & mask;
82
+ unsigned int srcLane = details::laneid();
83
+
84
+ const unsigned int base = __ffs(mask)-1; /* lane with rank == 0 */
85
+ const unsigned int rank = __popc(lanemask);
86
+
87
+ for (unsigned int i = 1, j = 1; i < groupSize; i <<= 1) {
88
+ if (i <= rank) {
89
+ srcLane -= j;
90
+ j = i; /* maximum possible lane */
91
+
92
+ unsigned int begLane = base + rank - i; /* minimum possible lane */
93
+
94
+ /* Next source lane is in the range [ begLane .. srcLane ]
95
+ * If begLane < srcLane then do a binary search.
96
+ */
97
+ while (begLane < srcLane) {
98
+ const unsigned int halfLane = (begLane + srcLane) >> 1;
99
+ const unsigned int halfMask = lanemask >> halfLane;
100
+ const unsigned int d = __popc(halfMask);
101
+ if (d < i) {
102
+ srcLane = halfLane - 1; /* halfLane too large */
103
+ }
104
+ else if ((i < d) || !(halfMask & 0x01)) {
105
+ begLane = halfLane + 1; /* halfLane too small */
106
+ }
107
+ else {
108
+ begLane = srcLane = halfLane; /* happen to hit */
109
+ }
110
+ }
111
+ }
112
+
113
+ auto tmp = details::tile::shuffle_dispatch<TyVal>::shfl(out, mask, srcLane, 32);
114
+ if (i <= rank) {
115
+ out = op(out, tmp);
116
+ }
117
+ }
118
+ return out;
119
+ }
120
+
121
+ template <unsigned int TySize, typename ParentT, typename TyVal, typename TyOp>
122
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const __single_warp_thread_block_tile<TySize, ParentT>& group,
123
+ TyVal&& val,
124
+ TyOp&& op) -> decltype(op(val, val)) {
125
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
126
+ }
127
+
128
+ template <typename TyVal, typename TyOp>
129
+ _CG_QUALIFIER auto coalesced_inclusive_scan(const coalesced_group& group, TyVal&& val, TyOp&& op) -> decltype(op(val, val)) {
130
+ if (group.size() == 32) {
131
+ return inclusive_scan_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
132
+ }
133
+ else {
134
+ return inclusive_scan_non_contiguous(group, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
135
+ }
136
+ }
137
+
138
+ template <bool IntegralOptimized>
139
+ struct scan_choose_convertion;
140
+
141
+ template<>
142
+ struct scan_choose_convertion<true> {
143
+ template <typename TyGroup, typename TyRes, typename TyVal>
144
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
145
+ return result - val;
146
+ }
147
+ };
148
+
149
+ template<>
150
+ struct scan_choose_convertion<false> {
151
+ template <typename TyGroup, typename TyRes, typename TyVal>
152
+ _CG_STATIC_QUALIFIER details::remove_qual<TyVal> convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val) {
153
+ auto ret = group.shfl_up(result, 1);
154
+ if (group.thread_rank() == 0) {
155
+ return {};
156
+ }
157
+ else {
158
+ return ret;
159
+ }
160
+ }
161
+ };
162
+
163
+ template <typename TyGroup, typename TyRes, typename TyVal, typename TyFn>
164
+ _CG_QUALIFIER auto convert_inclusive_to_exclusive(const TyGroup& group, TyRes& result, TyVal&& val, TyFn&& op) -> decltype(op(val, val)) {
165
+ using conversion = scan_choose_convertion<_CG_STL_NAMESPACE::is_same<remove_qual<TyFn>, cooperative_groups::plus<remove_qual<TyVal>>>::value
166
+ && _CG_STL_NAMESPACE::is_integral<remove_qual<TyVal>>::value>;
167
+ return conversion::convert_inclusive_to_exclusive(group, result, _CG_STL_NAMESPACE::forward<TyVal>(val));
168
+ }
169
+
170
+ } // details
171
+
172
+ _CG_END_NAMESPACE
173
+
174
+ #endif // _CG_COALESCED_SCAN_H_
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/driver_abi.h ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_DRIVER_API_H
50
+ #define _CG_DRIVER_API_H
51
+
52
+ #include "info.h"
53
+
54
+ _CG_BEGIN_NAMESPACE
55
+
56
+ namespace details {
57
+ template <unsigned int RegId>
58
+ _CG_QUALIFIER unsigned int load_env_reg() {
59
+ // Abort by default
60
+ _CG_ABORT();
61
+ return 0;
62
+ }
63
+
64
+ template <unsigned int HiReg, unsigned int LoReg>
65
+ _CG_QUALIFIER unsigned long long load_env_reg64() {
66
+ unsigned long long registerLo = load_env_reg<LoReg>();
67
+ unsigned long long registerHi = load_env_reg<HiReg>();
68
+
69
+ return (registerHi << 32) | registerLo;
70
+ }
71
+
72
+ // inline PTX for accessing registers requires an immediate for the special reg
73
+ # define LOAD_ENVREG(NUMBER) \
74
+ template <> _CG_QUALIFIER unsigned int load_env_reg<NUMBER>() { \
75
+ unsigned int r; \
76
+ asm ("mov.u32 %0, %%envreg" #NUMBER ";" : "=r"(r)); \
77
+ return r; \
78
+ }
79
+
80
+ // Instantiate loaders for registers used
81
+ LOAD_ENVREG(0);
82
+ LOAD_ENVREG(1);
83
+ LOAD_ENVREG(2);
84
+ # undef LOAD_ENVREG
85
+
86
+ struct grid_workspace {
87
+ unsigned int wsSize;
88
+ unsigned int barrier;
89
+ };
90
+
91
+ _CG_QUALIFIER grid_workspace* get_grid_workspace() {
92
+ unsigned long long gridWsAbiAddress = load_env_reg64<1, 2>();
93
+ // Interpret the address from envreg 1 and 2 as the driver's grid workspace
94
+ return (reinterpret_cast<grid_workspace*>(gridWsAbiAddress));
95
+ }
96
+ }
97
+ _CG_END_NAMESPACE
98
+
99
+ #endif // _CG_DRIVER_API_H
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/functional.h ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2016 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _CG_FUNCTIONAL_H
50
+ #define _CG_FUNCTIONAL_H
51
+
52
+ #include "info.h"
53
+ #include "helpers.h"
54
+
55
+ #ifdef _CG_CPP11_FEATURES
56
+ #ifdef _CG_USE_CUDA_STL
57
+ # include <cuda/std/functional>
58
+ #endif
59
+
60
+ _CG_BEGIN_NAMESPACE
61
+
62
+ namespace details {
63
+ #ifdef _CG_USE_CUDA_STL
64
+ using cuda::std::plus;
65
+ using cuda::std::bit_and;
66
+ using cuda::std::bit_xor;
67
+ using cuda::std::bit_or;
68
+ #else
69
+ template <typename Ty> struct plus {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 + arg2;}};
70
+ template <typename Ty> struct bit_and {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 & arg2;}};
71
+ template <typename Ty> struct bit_xor {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 ^ arg2;}};
72
+ template <typename Ty> struct bit_or {__device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {return arg1 | arg2;}};
73
+ #endif // _CG_USE_PLATFORM_STL
74
+ } // details
75
+
76
+ template <typename Ty>
77
+ struct plus : public details::plus<Ty> {};
78
+
79
+ template <typename Ty>
80
+ struct less {
81
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
82
+ return (arg2 < arg1) ? arg2 : arg1;
83
+ }
84
+ };
85
+
86
+ template <typename Ty>
87
+ struct greater {
88
+ __device__ __forceinline__ Ty operator()(Ty arg1, Ty arg2) const {
89
+ return (arg1 < arg2) ? arg2 : arg1;
90
+ }
91
+ };
92
+
93
+ template <typename Ty>
94
+ struct bit_and : public details::bit_and<Ty> {};
95
+
96
+ template <typename Ty>
97
+ struct bit_xor : public details::bit_xor<Ty> {};
98
+
99
+ template <typename Ty>
100
+ struct bit_or : public details::bit_or<Ty> {};
101
+
102
+ #if defined(_CG_HAS_STL_ATOMICS)
103
+ namespace details {
104
+ template <class Ty>
105
+ using _atomic_is_type_supported = _CG_STL_NAMESPACE::integral_constant<bool,
106
+ _CG_STL_NAMESPACE::is_integral<Ty>::value && (sizeof(Ty) == 4 || sizeof(Ty) == 8)>;
107
+
108
+ template <typename TyOp> struct _atomic_op_supported : public _CG_STL_NAMESPACE::false_type {};
109
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::plus<Ty>> : public _atomic_is_type_supported<Ty> {};
110
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::less<Ty>> : public _atomic_is_type_supported<Ty> {};
111
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::greater<Ty>> : public _atomic_is_type_supported<Ty> {};
112
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_and<Ty>> : public _atomic_is_type_supported<Ty> {};
113
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_or<Ty>> : public _atomic_is_type_supported<Ty> {};
114
+ template <typename Ty> struct _atomic_op_supported<cooperative_groups::bit_xor<Ty>> : public _atomic_is_type_supported<Ty> {};
115
+
116
+ template<typename TyAtomic, typename TyVal, typename TyOp>
117
+ _CG_QUALIFIER remove_qual<TyVal> atomic_cas_fallback(TyAtomic&& atomic, TyVal&& val, TyOp&& op) {
118
+ auto old = atomic.load(cuda::std::memory_order_relaxed);
119
+ while(!atomic.compare_exchange_weak(old, op(old, val), cuda::std::memory_order_relaxed));
120
+ return old;
121
+ }
122
+
123
+ template<typename TyOp>
124
+ struct op_picker;
125
+
126
+ template<typename TyVal>
127
+ struct op_picker<cooperative_groups::plus<TyVal>> {
128
+ template<typename TyAtomic>
129
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
130
+ return atomic.fetch_add(val, cuda::std::memory_order_relaxed);
131
+ }
132
+ };
133
+
134
+ template<typename TyVal>
135
+ struct op_picker<cooperative_groups::less<TyVal>> {
136
+ template<typename TyAtomic>
137
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
138
+ return atomic.fetch_min(val, cuda::std::memory_order_relaxed);
139
+ }
140
+ };
141
+
142
+ template<typename TyVal>
143
+ struct op_picker<cooperative_groups::greater<TyVal>> {
144
+ template<typename TyAtomic>
145
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
146
+ return atomic.fetch_max(val, cuda::std::memory_order_relaxed);
147
+ }
148
+ };
149
+
150
+ template<typename TyVal>
151
+ struct op_picker<cooperative_groups::bit_and<TyVal>> {
152
+ template<typename TyAtomic>
153
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
154
+ return atomic.fetch_and(val, cuda::std::memory_order_relaxed);
155
+ }
156
+ };
157
+
158
+ template<typename TyVal>
159
+ struct op_picker<cooperative_groups::bit_xor<TyVal>> {
160
+ template<typename TyAtomic>
161
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
162
+ return atomic.fetch_xor(val, cuda::std::memory_order_relaxed);
163
+ }
164
+ };
165
+
166
+ template<typename TyVal>
167
+ struct op_picker<cooperative_groups::bit_or<TyVal>> {
168
+ template<typename TyAtomic>
169
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val) {
170
+ return atomic.fetch_or(val, cuda::std::memory_order_relaxed);
171
+ }
172
+ };
173
+
174
+ template<bool atomic_supported>
175
+ struct atomic_update_dispatch {};
176
+
177
+ template<>
178
+ struct atomic_update_dispatch<false> {
179
+ template<typename TyAtomic, typename TyVal, typename TyOp>
180
+ _CG_STATIC_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
181
+ return atomic_cas_fallback(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
182
+ }
183
+ };
184
+
185
+ template<>
186
+ struct atomic_update_dispatch<true> {
187
+ template<typename TyAtomic, typename TyVal, typename TyOp>
188
+ _CG_STATIC_QUALIFIER TyVal atomic_update(TyAtomic& atomic, TyVal val, TyOp&& op) {
189
+ using dispatch = op_picker<details::remove_qual<TyOp>>;
190
+
191
+ return dispatch::atomic_update(atomic, val);
192
+ }
193
+ };
194
+
195
+ template<typename TyAtomic, typename TyVal, typename TyOp>
196
+ _CG_QUALIFIER remove_qual<TyVal> atomic_update(TyAtomic& atomic, TyVal&& val, TyOp&& op) {
197
+ using dispatch = atomic_update_dispatch<_atomic_op_supported<details::remove_qual<TyOp>>::value>;
198
+
199
+ return dispatch::atomic_update(atomic, _CG_STL_NAMESPACE::forward<TyVal>(val), _CG_STL_NAMESPACE::forward<TyOp>(op));
200
+ }
201
+
202
+ template<typename TyAtomic, typename TyVal>
203
+ _CG_QUALIFIER void atomic_store(TyAtomic& atomic, TyVal&& val) {
204
+ atomic.store(val, cuda::std::memory_order_relaxed);
205
+ }
206
+ }
207
+ #endif
208
+
209
+ _CG_END_NAMESPACE
210
+
211
+ #endif
212
+ #endif //_CG_FUNCTIONAL_H
venv/lib/python3.10/site-packages/nvidia/cuda_runtime/include/cooperative_groups/details/helpers.h ADDED
@@ -0,0 +1,634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 1993-2021 NVIDIA Corporation. All rights reserved.
2
+ *
3
+ * NOTICE TO LICENSEE:
4
+ *
5
+ * The source code and/or documentation ("Licensed Deliverables") are
6
+ * subject to NVIDIA intellectual property rights under U.S. and
7
+ * international Copyright laws.
8
+ *
9
+ * The Licensed Deliverables contained herein are PROPRIETARY and
10
+ * CONFIDENTIAL to NVIDIA and are being provided under the terms and
11
+ * conditions of a form of NVIDIA software license agreement by and
12
+ * between NVIDIA and Licensee ("License Agreement") or electronically
13
+ * accepted by Licensee. Notwithstanding any terms or conditions to
14
+ * the contrary in the License Agreement, reproduction or disclosure
15
+ * of the Licensed Deliverables to any third party without the express
16
+ * written consent of NVIDIA is prohibited.
17
+ *
18
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
19
+ * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
20
+ * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. THEY ARE
21
+ * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
22
+ * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
23
+ * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
24
+ * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
25
+ * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
26
+ * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
27
+ * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
28
+ * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
29
+ * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
30
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
31
+ * OF THESE LICENSED DELIVERABLES.
32
+ *
33
+ * U.S. Government End Users. These Licensed Deliverables are a
34
+ * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
35
+ * 1995), consisting of "commercial computer software" and "commercial
36
+ * computer software documentation" as such terms are used in 48
37
+ * C.F.R. 12.212 (SEPT 1995) and are provided to the U.S. Government
38
+ * only as a commercial end item. Consistent with 48 C.F.R.12.212 and
39
+ * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
40
+ * U.S. Government End Users acquire the Licensed Deliverables with
41
+ * only those rights set forth herein.
42
+ *
43
+ * Any use of the Licensed Deliverables in individual and commercial
44
+ * software must include, in the user documentation and internal
45
+ * comments to the code, the above Disclaimer and U.S. Government End
46
+ * Users Notice.
47
+ */
48
+
49
+ #ifndef _COOPERATIVE_GROUPS_HELPERS_H_
50
+ # define _COOPERATIVE_GROUPS_HELPERS_H_
51
+
52
+ #include "info.h"
53
+ #include "sync.h"
54
+
55
+ _CG_BEGIN_NAMESPACE
56
+
57
+ namespace details {
58
+ #ifdef _CG_CPP11_FEATURES
59
+ template <typename Ty> struct _is_float_or_half : public _CG_STL_NAMESPACE::is_floating_point<Ty> {};
60
+ # ifdef _CG_HAS_FP16_COLLECTIVE
61
+ template <> struct _is_float_or_half<__half> : public _CG_STL_NAMESPACE::true_type {};
62
+ template <> struct _is_float_or_half<__half2> : public _CG_STL_NAMESPACE::true_type {};
63
+ # endif
64
+ template <typename Ty>
65
+ using is_float_or_half = _is_float_or_half<typename _CG_STL_NAMESPACE::remove_cv<Ty>::type>;
66
+
67
+ // Non-STL utility templates
68
+ template <typename Ty>
69
+ using remove_qual = typename _CG_STL_NAMESPACE::remove_cv<typename _CG_STL_NAMESPACE::remove_reference<Ty>::type>::type;
70
+
71
+ template <typename TyLhs, typename TyRhs>
72
+ using is_op_type_same = _CG_STL_NAMESPACE::is_same<remove_qual<TyLhs>, remove_qual<TyRhs>
73
+ >;
74
+ #endif
75
+
76
+ template <typename TyTrunc>
77
+ _CG_STATIC_QUALIFIER TyTrunc vec3_to_linear(dim3 index, dim3 nIndex) {
78
+ return ((TyTrunc)index.z * nIndex.y * nIndex.x) +
79
+ ((TyTrunc)index.y * nIndex.x) +
80
+ (TyTrunc)index.x;
81
+ }
82
+
83
+ namespace cta {
84
+
85
+ _CG_STATIC_QUALIFIER void sync()
86
+ {
87
+ __barrier_sync(0);
88
+ }
89
+
90
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
91
+ {
92
+ return static_cast<unsigned int>(blockDim.x * blockDim.y * blockDim.z);
93
+ }
94
+
95
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
96
+ {
97
+ return vec3_to_linear<unsigned int>(threadIdx, blockDim);
98
+ }
99
+
100
+ _CG_STATIC_QUALIFIER dim3 group_index()
101
+ {
102
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
103
+ }
104
+
105
+ _CG_STATIC_QUALIFIER dim3 thread_index()
106
+ {
107
+ return dim3(threadIdx.x, threadIdx.y, threadIdx.z);
108
+ }
109
+
110
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
111
+ {
112
+ return dim3(blockDim.x, blockDim.y, blockDim.z);
113
+ }
114
+
115
+ // Legacy aliases
116
+ _CG_STATIC_QUALIFIER unsigned int size()
117
+ {
118
+ return num_threads();
119
+ }
120
+
121
+ _CG_STATIC_QUALIFIER dim3 block_dim()
122
+ {
123
+ return dim_threads();
124
+ }
125
+
126
+ };
127
+
128
+ class _coalesced_group_data_access {
129
+ public:
130
+ // Retrieve mask of coalesced groups and tiles
131
+ template <typename TyGroup>
132
+ _CG_STATIC_QUALIFIER unsigned int get_mask(const TyGroup &group) {
133
+ return group.get_mask();
134
+ }
135
+
136
+ template <typename TyGroup>
137
+ _CG_STATIC_QUALIFIER TyGroup construct_from_mask(unsigned int mask) {
138
+ return TyGroup(mask);
139
+ }
140
+
141
+ template <typename TyGroup>
142
+ _CG_STATIC_QUALIFIER void modify_meta_group(TyGroup &group, unsigned int mgRank, unsigned int mgSize) {
143
+ group._data.coalesced.metaGroupRank = mgRank;
144
+ group._data.coalesced.metaGroupSize = mgSize;
145
+ }
146
+ };
147
+
148
+ namespace tile {
149
+ template <unsigned int TileCount, unsigned int TileMask, unsigned int LaneMask, unsigned int ShiftCount>
150
+ struct _tile_helpers{
151
+ _CG_STATIC_CONST_DECL unsigned int tileCount = TileCount;
152
+ _CG_STATIC_CONST_DECL unsigned int tileMask = TileMask;
153
+ _CG_STATIC_CONST_DECL unsigned int laneMask = LaneMask;
154
+ _CG_STATIC_CONST_DECL unsigned int shiftCount = ShiftCount;
155
+ };
156
+
157
+ template <unsigned int> struct tile_helpers;
158
+ template <> struct tile_helpers<32> : public _tile_helpers<1, 0xFFFFFFFF, 0x1F, 5> {};
159
+ template <> struct tile_helpers<16> : public _tile_helpers<2, 0x0000FFFF, 0x0F, 4> {};
160
+ template <> struct tile_helpers<8> : public _tile_helpers<4, 0x000000FF, 0x07, 3> {};
161
+ template <> struct tile_helpers<4> : public _tile_helpers<8, 0x0000000F, 0x03, 2> {};
162
+ template <> struct tile_helpers<2> : public _tile_helpers<16, 0x00000003, 0x01, 1> {};
163
+ template <> struct tile_helpers<1> : public _tile_helpers<32, 0x00000001, 0x00, 0> {};
164
+
165
+ #ifdef _CG_CPP11_FEATURES
166
+ namespace shfl {
167
+ /***********************************************************************************
168
+ * Recursively Sliced Shuffle
169
+ * Purpose:
170
+ * Slices an input type a number of times into integral types so that shuffles
171
+ * are well defined
172
+ * Expectations:
173
+ * This object *should not* be used from a reinterpret_cast pointer unless
174
+ * some alignment guarantees can be met. Use a memcpy to guarantee that loads
175
+ * from the integral types stored within are aligned and correct.
176
+ **********************************************************************************/
177
+ template <unsigned int count, bool intSized = (count <= sizeof(int))>
178
+ struct recursive_sliced_shuffle_helper;
179
+
180
+ template <unsigned int count>
181
+ struct recursive_sliced_shuffle_helper<count, true> {
182
+ int val;
183
+
184
+ template <typename TyFn>
185
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
186
+ val = shfl(val);
187
+ }
188
+ };
189
+
190
+ template <unsigned int count>
191
+ struct recursive_sliced_shuffle_helper<count, false> {
192
+ int val;
193
+ recursive_sliced_shuffle_helper<count - sizeof(int)> next;
194
+
195
+ template <typename TyFn>
196
+ _CG_QUALIFIER void invoke_shuffle(const TyFn &shfl) {
197
+ val = shfl(val);
198
+ next.invoke_shuffle(shfl);
199
+ }
200
+ };
201
+ }
202
+
203
+ struct _memory_shuffle {
204
+ template <typename TyElem, typename TyShflFn>
205
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
206
+ static_assert(sizeof(TyElem) <= 32, "Cooperative groups collectives are limited to types smaller than 32B");
207
+ return TyElem{};
208
+ }
209
+
210
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
211
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
212
+ auto shfl = [=](int val) -> int {
213
+ return 0;
214
+ };
215
+
216
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
217
+ }
218
+
219
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
220
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
221
+ auto shfl = [=](int val) -> int {
222
+ return 0;
223
+ };
224
+
225
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
226
+ }
227
+
228
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
229
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
230
+ auto shfl = [=](int val) -> int {
231
+ return 0;
232
+ };
233
+
234
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
235
+ }
236
+
237
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
238
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
239
+ auto shfl = [=](int val) -> int {
240
+ return 0;
241
+ };
242
+
243
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
244
+ }
245
+ };
246
+
247
+ /***********************************************************************************
248
+ * Intrinsic Device Function Shuffle
249
+ * Purpose:
250
+ * Uses a shuffle helper that has characteristics best suited for moving
251
+ * elements between threads
252
+ * Expectations:
253
+ * Object given will be forced into an l-value type so that it can be used
254
+ * with a helper structure that reinterprets the data into intrinsic compatible
255
+ * types
256
+ * Notes:
257
+ * !! TyRet is required so that objects are returned by value and not as
258
+ * dangling references depending on the value category of the passed object
259
+ **********************************************************************************/
260
+ struct _intrinsic_compat_shuffle {
261
+ template <unsigned int count>
262
+ using shfl_helper = shfl::recursive_sliced_shuffle_helper<count>;
263
+
264
+ template <typename TyElem, typename TyShflFn>
265
+ _CG_STATIC_QUALIFIER TyElem _shfl_internal(TyElem elem, const TyShflFn& fn) {
266
+ static_assert(__is_trivially_copyable(TyElem), "Type is not compatible with device shuffle");
267
+ shfl_helper<sizeof(TyElem)> helper;
268
+ memcpy(&helper, &elem, sizeof(TyElem));
269
+ helper.invoke_shuffle(fn);
270
+ memcpy(&elem, &helper, sizeof(TyElem));
271
+ return elem;
272
+ }
273
+
274
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
275
+ _CG_STATIC_QUALIFIER TyRet shfl(TyElem&& elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
276
+ auto shfl = [=](int val) -> int {
277
+ return __shfl_sync(gMask, val, srcRank, threads);
278
+ };
279
+
280
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
281
+ }
282
+
283
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
284
+ _CG_STATIC_QUALIFIER TyRet shfl_down(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
285
+ auto shfl = [=](int val) -> int {
286
+ return __shfl_down_sync(gMask, val, delta, threads);
287
+ };
288
+
289
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
290
+ }
291
+
292
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
293
+ _CG_STATIC_QUALIFIER TyRet shfl_up(TyElem&& elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
294
+ auto shfl = [=](int val) -> int {
295
+ return __shfl_up_sync(gMask, val, delta, threads);
296
+ };
297
+
298
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
299
+ }
300
+
301
+ template <typename TyElem, typename TyRet = remove_qual<TyElem>>
302
+ _CG_STATIC_QUALIFIER TyRet shfl_xor(TyElem&& elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
303
+ auto shfl = [=](int val) -> int {
304
+ return __shfl_xor_sync(gMask, val, lMask, threads);
305
+ };
306
+
307
+ return _shfl_internal<TyRet>(_CG_STL_NAMESPACE::forward<TyElem>(elem), shfl);
308
+ }
309
+ };
310
+
311
+ struct _native_shuffle {
312
+ template <typename TyElem>
313
+ _CG_STATIC_QUALIFIER TyElem shfl(
314
+ TyElem elem, unsigned int gMask, unsigned int srcRank, unsigned int threads) {
315
+ return static_cast<TyElem>(__shfl_sync(gMask, elem, srcRank, threads));
316
+ }
317
+
318
+ template <typename TyElem>
319
+ _CG_STATIC_QUALIFIER TyElem shfl_down(
320
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
321
+ return static_cast<TyElem>(__shfl_down_sync(gMask, elem, delta, threads));
322
+ }
323
+
324
+ template <typename TyElem>
325
+ _CG_STATIC_QUALIFIER TyElem shfl_up(
326
+ TyElem elem, unsigned int gMask, unsigned int delta, unsigned int threads) {
327
+ return static_cast<TyElem>(__shfl_up_sync(gMask, elem, delta, threads));
328
+ }
329
+
330
+ template <typename TyElem>
331
+ _CG_STATIC_QUALIFIER TyElem shfl_xor(
332
+ TyElem elem, unsigned int gMask, unsigned int lMask, unsigned int threads) {
333
+ return static_cast<TyElem>(__shfl_xor_sync(gMask, elem, lMask, threads));
334
+ }
335
+ };
336
+
337
+ // Almost all arithmetic types are supported by native shuffle
338
+ // Vector types are the exception
339
+ template <typename TyElem>
340
+ using use_native_shuffle = _CG_STL_NAMESPACE::integral_constant<
341
+ bool,
342
+ _CG_STL_NAMESPACE::is_integral<
343
+ remove_qual<TyElem>>::value ||
344
+ details::is_float_or_half<
345
+ remove_qual<TyElem>>::value
346
+ >;
347
+
348
+ constexpr unsigned long long _MemoryShuffleCutoff = 32;
349
+
350
+ template <typename TyElem,
351
+ bool IsNative = use_native_shuffle<TyElem>::value,
352
+ bool InMem = (sizeof(TyElem) > _MemoryShuffleCutoff)>
353
+ struct shuffle_dispatch;
354
+
355
+ template <typename TyElem>
356
+ struct shuffle_dispatch<TyElem, true, false> : public _native_shuffle {};
357
+
358
+ template <typename TyElem>
359
+ struct shuffle_dispatch<TyElem, false, false> : public _intrinsic_compat_shuffle {};
360
+
361
+ template <typename TyElem>
362
+ struct shuffle_dispatch<TyElem, false, true> : public _memory_shuffle {};
363
+
364
+ #endif //_CG_CPP11_FEATURES
365
+ };
366
+
367
+ namespace multi_grid {
368
+ struct multi_grid_functions;
369
+ };
370
+
371
+ namespace grid {
372
+ _CG_STATIC_QUALIFIER void sync(unsigned int *bar) {
373
+ unsigned int expected = gridDim.x * gridDim.y * gridDim.z;
374
+
375
+ details::sync_grids(expected, bar);
376
+ }
377
+
378
+ _CG_STATIC_QUALIFIER unsigned long long num_blocks()
379
+ {
380
+ // grid.y * grid.z -> [max(65535) * max(65535)] fits within 4b, promote after multiplication
381
+ // grid.x * (grid.y * grid.z) -> [max(2^31-1) * max(65535 * 65535)] exceeds 4b, promote before multiplication
382
+ return (unsigned long long)gridDim.x * (gridDim.y * gridDim.z);
383
+ }
384
+
385
+ _CG_STATIC_QUALIFIER unsigned long long num_threads()
386
+ {
387
+ return num_blocks() * cta::num_threads();
388
+ }
389
+
390
+ _CG_STATIC_QUALIFIER unsigned long long block_rank()
391
+ {
392
+ return vec3_to_linear<unsigned long long>(blockIdx, gridDim);
393
+ }
394
+
395
+ _CG_STATIC_QUALIFIER unsigned long long thread_rank()
396
+ {
397
+ return block_rank() * cta::num_threads() + cta::thread_rank();
398
+ }
399
+
400
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
401
+ {
402
+ return dim3(gridDim.x, gridDim.y, gridDim.z);
403
+ }
404
+
405
+ _CG_STATIC_QUALIFIER dim3 block_index()
406
+ {
407
+ return dim3(blockIdx.x, blockIdx.y, blockIdx.z);
408
+ }
409
+
410
+ #if defined(_CG_HAS_CLUSTER_GROUP)
411
+ _CG_STATIC_QUALIFIER dim3 dim_clusters() {
412
+ return __clusterGridDimInClusters();
413
+ }
414
+
415
+ _CG_STATIC_QUALIFIER unsigned long long num_clusters() {
416
+ const dim3 dimClusters = dim_clusters();
417
+ return dimClusters.x * dimClusters.y * dimClusters.z;
418
+ }
419
+
420
+ _CG_STATIC_QUALIFIER dim3 cluster_index() {
421
+ return __clusterIdx();
422
+ }
423
+
424
+ _CG_STATIC_QUALIFIER unsigned long long cluster_rank() {
425
+ return vec3_to_linear<unsigned long long>(cluster_index(), dim_clusters());
426
+ }
427
+ #endif
428
+
429
+ // Legacy aliases
430
+ _CG_STATIC_QUALIFIER unsigned long long size()
431
+ {
432
+ return num_threads();
433
+ }
434
+
435
+ _CG_STATIC_QUALIFIER dim3 grid_dim()
436
+ {
437
+ return dim_blocks();
438
+ }
439
+ };
440
+
441
+
442
+ #if defined(_CG_HAS_MULTI_GRID_GROUP)
443
+
444
+ namespace multi_grid {
445
+ _CG_STATIC_QUALIFIER unsigned long long get_intrinsic_handle()
446
+ {
447
+ return (cudaCGGetIntrinsicHandle(cudaCGScopeMultiGrid));
448
+ }
449
+
450
+ _CG_STATIC_QUALIFIER void sync(const unsigned long long handle)
451
+ {
452
+ cudaError_t err = cudaCGSynchronize(handle, 0);
453
+ }
454
+
455
+ _CG_STATIC_QUALIFIER unsigned int size(const unsigned long long handle)
456
+ {
457
+ unsigned int numThreads = 0;
458
+ cudaCGGetSize(&numThreads, NULL, handle);
459
+ return numThreads;
460
+ }
461
+
462
+ _CG_STATIC_QUALIFIER unsigned int thread_rank(const unsigned long long handle)
463
+ {
464
+ unsigned int threadRank = 0;
465
+ cudaCGGetRank(&threadRank, NULL, handle);
466
+ return threadRank;
467
+ }
468
+
469
+ _CG_STATIC_QUALIFIER unsigned int grid_rank(const unsigned long long handle)
470
+ {
471
+ unsigned int gridRank = 0;
472
+ cudaCGGetRank(NULL, &gridRank, handle);
473
+ return gridRank;
474
+ }
475
+
476
+ _CG_STATIC_QUALIFIER unsigned int num_grids(const unsigned long long handle)
477
+ {
478
+ unsigned int numGrids = 0;
479
+ cudaCGGetSize(NULL, &numGrids, handle);
480
+ return numGrids;
481
+ }
482
+
483
+ # ifdef _CG_CPP11_FEATURES
484
+ struct multi_grid_functions {
485
+ decltype(multi_grid::get_intrinsic_handle) *get_intrinsic_handle;
486
+ decltype(multi_grid::sync) *sync;
487
+ decltype(multi_grid::size) *size;
488
+ decltype(multi_grid::thread_rank) *thread_rank;
489
+ decltype(multi_grid::grid_rank) *grid_rank;
490
+ decltype(multi_grid::num_grids) *num_grids;
491
+ };
492
+
493
+ template <typename = void>
494
+ _CG_STATIC_QUALIFIER const multi_grid_functions* load_grid_intrinsics() {
495
+ __constant__ static const multi_grid_functions mgf {
496
+ &multi_grid::get_intrinsic_handle,
497
+ &multi_grid::sync,
498
+ &multi_grid::size,
499
+ &multi_grid::thread_rank,
500
+ &multi_grid::grid_rank,
501
+ &multi_grid::num_grids
502
+ };
503
+
504
+ return &mgf;
505
+ }
506
+ # endif
507
+ };
508
+ #endif
509
+
510
+ #if defined(_CG_HAS_CLUSTER_GROUP)
511
+ namespace cluster {
512
+
513
+ _CG_STATIC_QUALIFIER bool isReal()
514
+ {
515
+ return __clusterDimIsSpecified();
516
+ }
517
+
518
+ _CG_STATIC_QUALIFIER void barrier_arrive()
519
+ {
520
+ __cluster_barrier_arrive();
521
+ }
522
+
523
+ _CG_STATIC_QUALIFIER void barrier_wait()
524
+ {
525
+ __cluster_barrier_wait();
526
+ }
527
+
528
+ _CG_STATIC_QUALIFIER void sync()
529
+ {
530
+ barrier_arrive();
531
+ barrier_wait();
532
+ }
533
+
534
+ _CG_STATIC_QUALIFIER unsigned int query_shared_rank(const void *addr)
535
+ {
536
+ return __cluster_query_shared_rank(addr);
537
+ }
538
+
539
+ template <typename T>
540
+ _CG_STATIC_QUALIFIER T* map_shared_rank(T *addr, int rank)
541
+ {
542
+ return static_cast<T*>(__cluster_map_shared_rank(addr, rank));
543
+ }
544
+
545
+ _CG_STATIC_QUALIFIER dim3 block_index()
546
+ {
547
+ return __clusterRelativeBlockIdx();
548
+ }
549
+
550
+ _CG_STATIC_QUALIFIER unsigned int block_rank()
551
+ {
552
+ return __clusterRelativeBlockRank();
553
+ }
554
+
555
+ _CG_STATIC_QUALIFIER unsigned int thread_rank()
556
+ {
557
+ return block_rank() * cta::num_threads() + cta::thread_rank();
558
+ }
559
+
560
+ _CG_STATIC_QUALIFIER dim3 dim_blocks()
561
+ {
562
+ return __clusterDim();
563
+ }
564
+
565
+ _CG_STATIC_QUALIFIER unsigned int num_blocks()
566
+ {
567
+ return __clusterSizeInBlocks();
568
+ }
569
+
570
+ _CG_STATIC_QUALIFIER dim3 dim_threads()
571
+ {
572
+ const dim3 dimBlocks = dim_blocks();
573
+ const unsigned int x = dimBlocks.x * blockDim.x;
574
+ const unsigned int y = dimBlocks.y * blockDim.y;
575
+ const unsigned int z = dimBlocks.z * blockDim.z;
576
+ return dim3(x, y, z);
577
+ }
578
+
579
+ _CG_STATIC_QUALIFIER unsigned int num_threads()
580
+ {
581
+ return num_blocks() * cta::num_threads();
582
+ }
583
+
584
+ };
585
+ #endif
586
+
587
+ _CG_STATIC_QUALIFIER unsigned int laneid()
588
+ {
589
+ unsigned int laneid;
590
+ asm ("mov.u32 %0, %%laneid;" : "=r"(laneid));
591
+ return laneid;
592
+ }
593
+
594
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_eq()
595
+ {
596
+ unsigned int lanemask32_eq;
597
+ asm ("mov.u32 %0, %%lanemask_eq;" : "=r"(lanemask32_eq));
598
+ return (lanemask32_eq);
599
+ }
600
+
601
+ _CG_STATIC_QUALIFIER unsigned int lanemask32_lt()
602
+ {
603
+ unsigned int lanemask32_lt;
604
+ asm ("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask32_lt));
605
+ return (lanemask32_lt);
606
+ }
607
+
608
+ _CG_STATIC_QUALIFIER void abort()
609
+ {
610
+ _CG_ABORT();
611
+ }
612
+
613
+ template <typename Ty>
614
+ _CG_QUALIFIER void assert_if_not_arithmetic() {
615
+ #ifdef _CG_CPP11_FEATURES
616
+ static_assert(
617
+ _CG_STL_NAMESPACE::is_integral<Ty>::value ||
618
+ details::is_float_or_half<Ty>::value,
619
+ "Error: Ty is neither integer or float"
620
+ );
621
+ #endif //_CG_CPP11_FEATURES
622
+ }
623
+
624
+ #ifdef _CG_CPP11_FEATURES
625
+ _CG_STATIC_QUALIFIER constexpr unsigned int log2(unsigned int x) {
626
+ return x == 1 ? 0 : 1 + log2(x / 2);
627
+ }
628
+ #endif //_CG_CPP11_FEATURES
629
+
630
+ }; // !Namespace internal
631
+
632
+ _CG_END_NAMESPACE
633
+
634
+ #endif /* !_COOPERATIVE_GROUPS_HELPERS_H_ */