Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh +3 -0
- venv/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh +3 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/Backend.h +388 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h +59 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h +57 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h +129 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h +45 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h +199 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h +123 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h +107 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h +218 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h +42 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h +32 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h +66 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h +337 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h +102 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h +28 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h +59 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h +428 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h +113 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h +162 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h +239 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h +24 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h +315 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h +63 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h +91 -0
- venv/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h +12 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUCachingAllocator.h +20 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUDeviceProp.h +155 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUException.h +22 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUFunctions.h +35 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUMacros.h +19 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUStream.h +162 -0
- venv/lib/python3.10/site-packages/torch/include/c10/xpu/impl/XPUGuardImpl.h +125 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/attr.h +690 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/buffer_info.h +208 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/cast.h +1837 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/chrono.h +225 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/common.h +2 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/complex.h +74 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/class.h +748 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/common.h +1267 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/descr.h +171 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/init.h +434 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/internals.h +667 -0
- venv/lib/python3.10/site-packages/torch/include/pybind11/detail/type_caster_base.h +1218 -0
ckpts/universal/global_step120/zero/10.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5e305378077691c1e1707ac61e6e781ba77912aebb0b739739f4a27787a6730
|
3 |
+
size 50332828
|
ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:60f882c18d2a4a922d2308c0a7d8da218205c905605fe03eaa8fc9b5c9d9c833
|
3 |
+
size 9372
|
ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:552f7b41d62cf60240a4300545947e080c170e5eb6293ccc3292eba26ac46f77
|
3 |
+
size 9387
|
ckpts/universal/global_step120/zero/23.post_attention_layernorm.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51af4b0c5807cff211952cd04d0003ed554f95d3e2ee55dd51846d5bc5d699c9
|
3 |
+
size 9293
|
venv/lib/python3.10/site-packages/torch/include/THC/THCAtomics.cuh
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// TODO: Remove once torchvision has been updated to use the ATen header
|
3 |
+
#include <ATen/cuda/Atomic.cuh>
|
venv/lib/python3.10/site-packages/torch/include/THC/THCDeviceUtils.cuh
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
// TODO: Remove this header
|
3 |
+
#include <ATen/cuda/DeviceUtils.cuh>
|
venv/lib/python3.10/site-packages/torch/include/c10/core/Backend.h
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceType.h>
|
4 |
+
#include <c10/core/DispatchKey.h>
|
5 |
+
#include <c10/core/DispatchKeySet.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
|
8 |
+
#include <stdexcept>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
/**
|
13 |
+
* This legacy enum class defines the set of backends supported by old school,
|
14 |
+
* code generated Type-based ATen. A "backend" in this sense roughly
|
15 |
+
* corresponds to the cartesian product of (device type, layout), but restricted
|
16 |
+
* only to combinations which we actually have kernels for. Backend does NOT
|
17 |
+
* include dtype.
|
18 |
+
*
|
19 |
+
* The reason we are sunsetting this enum class is because it doesn't allow for
|
20 |
+
* open registration; e.g., if you want to add SparseXLA, you'd have to
|
21 |
+
* edit this enum; you wouldn't be able to do it out of tree. DispatchKey is
|
22 |
+
* the replacement for Backend which supports open registration.
|
23 |
+
*
|
24 |
+
* NB: The concept of 'Backend' here disagrees with the notion of backend
|
25 |
+
* exposed to users in torch.backends. Backend here is something like "CPU"
|
26 |
+
* or "SparseCUDA"; backend in torch.backends is something like "MKL" or
|
27 |
+
* "CUDNN".
|
28 |
+
*/
|
29 |
+
enum class Backend {
|
30 |
+
CPU,
|
31 |
+
CUDA,
|
32 |
+
HIP,
|
33 |
+
VE,
|
34 |
+
FPGA,
|
35 |
+
IPU,
|
36 |
+
XPU,
|
37 |
+
SparseCPU,
|
38 |
+
SparseCUDA,
|
39 |
+
SparseCsrCPU,
|
40 |
+
SparseCsrCUDA,
|
41 |
+
SparseHIP,
|
42 |
+
SparseVE,
|
43 |
+
SparseXPU,
|
44 |
+
SparsePrivateUse1,
|
45 |
+
SparseCsrHIP,
|
46 |
+
SparseCsrVE,
|
47 |
+
SparseCsrXPU,
|
48 |
+
SparseCsrPrivateUse1,
|
49 |
+
ORT,
|
50 |
+
XLA,
|
51 |
+
Vulkan,
|
52 |
+
Metal,
|
53 |
+
Meta,
|
54 |
+
QuantizedCPU,
|
55 |
+
QuantizedCUDA,
|
56 |
+
QuantizedXPU,
|
57 |
+
QuantizedPrivateUse1,
|
58 |
+
Undefined,
|
59 |
+
MkldnnCPU,
|
60 |
+
MPS,
|
61 |
+
HPU,
|
62 |
+
Lazy,
|
63 |
+
MTIA,
|
64 |
+
PrivateUse1,
|
65 |
+
NumOptions
|
66 |
+
};
|
67 |
+
|
68 |
+
static inline Backend dispatchKeyToBackend(DispatchKey t) {
|
69 |
+
if (t == DispatchKey::CPU || t == DispatchKey::AutogradCPU) {
|
70 |
+
return Backend::CPU;
|
71 |
+
} else if (t == DispatchKey::CUDA || t == DispatchKey::AutogradCUDA) {
|
72 |
+
return Backend::CUDA;
|
73 |
+
} else if (t == DispatchKey::HIP) {
|
74 |
+
return Backend::HIP;
|
75 |
+
} else if (t == DispatchKey::VE) {
|
76 |
+
return Backend::VE;
|
77 |
+
} else if (t == DispatchKey::FPGA) {
|
78 |
+
return Backend::FPGA;
|
79 |
+
} else if (t == DispatchKey::ORT) {
|
80 |
+
return Backend::ORT;
|
81 |
+
} else if (t == DispatchKey::XLA || t == DispatchKey::AutogradXLA) {
|
82 |
+
return Backend::XLA;
|
83 |
+
} else if (t == DispatchKey::Lazy || t == DispatchKey::AutogradLazy) {
|
84 |
+
return Backend::Lazy;
|
85 |
+
} else if (t == DispatchKey::MPS || t == DispatchKey::AutogradMPS) {
|
86 |
+
return Backend::MPS;
|
87 |
+
} else if (t == DispatchKey::Vulkan) {
|
88 |
+
return Backend::Vulkan;
|
89 |
+
} else if (t == DispatchKey::Metal) {
|
90 |
+
return Backend::Metal;
|
91 |
+
} else if (t == DispatchKey::Meta) {
|
92 |
+
return Backend::Meta;
|
93 |
+
} else if (t == DispatchKey::SparseCPU) {
|
94 |
+
return Backend::SparseCPU;
|
95 |
+
} else if (t == DispatchKey::SparseCUDA) {
|
96 |
+
return Backend::SparseCUDA;
|
97 |
+
} else if (t == DispatchKey::SparseHIP) {
|
98 |
+
return Backend::SparseHIP;
|
99 |
+
} else if (t == DispatchKey::SparseVE) {
|
100 |
+
return Backend::SparseVE;
|
101 |
+
} else if (t == DispatchKey::SparsePrivateUse1) {
|
102 |
+
return Backend::SparsePrivateUse1;
|
103 |
+
} else if (t == DispatchKey::SparseCsrCPU) {
|
104 |
+
return Backend::SparseCsrCPU;
|
105 |
+
} else if (t == DispatchKey::SparseCsrCUDA) {
|
106 |
+
return Backend::SparseCsrCUDA;
|
107 |
+
} else if (t == DispatchKey::SparseCsrHIP) {
|
108 |
+
return Backend::SparseCsrHIP;
|
109 |
+
} else if (t == DispatchKey::SparseCsrVE) {
|
110 |
+
return Backend::SparseCsrVE;
|
111 |
+
} else if (t == DispatchKey::SparseCsrPrivateUse1) {
|
112 |
+
return Backend::SparseCsrPrivateUse1;
|
113 |
+
} else if (t == DispatchKey::MkldnnCPU) {
|
114 |
+
return Backend::MkldnnCPU;
|
115 |
+
} else if (t == DispatchKey::QuantizedCPU) {
|
116 |
+
return Backend::QuantizedCPU;
|
117 |
+
} else if (t == DispatchKey::QuantizedCUDA) {
|
118 |
+
return Backend::QuantizedCUDA;
|
119 |
+
} else if (t == DispatchKey::IPU || t == DispatchKey::AutogradIPU) {
|
120 |
+
return Backend::IPU;
|
121 |
+
} else if (t == DispatchKey::XPU || t == DispatchKey::AutogradXPU) {
|
122 |
+
return Backend::XPU;
|
123 |
+
} else if (t == DispatchKey::SparseXPU) {
|
124 |
+
return Backend::SparseXPU;
|
125 |
+
} else if (t == DispatchKey::SparseCsrXPU) {
|
126 |
+
return Backend::SparseCsrXPU;
|
127 |
+
} else if (t == DispatchKey::QuantizedXPU) {
|
128 |
+
return Backend::QuantizedXPU;
|
129 |
+
} else if (t == DispatchKey::QuantizedPrivateUse1) {
|
130 |
+
return Backend::QuantizedPrivateUse1;
|
131 |
+
} else if (t == DispatchKey::HPU || t == DispatchKey::AutogradHPU) {
|
132 |
+
return Backend::HPU;
|
133 |
+
} else if (t == DispatchKey::MTIA || t == DispatchKey::AutogradMTIA) {
|
134 |
+
return Backend::MTIA;
|
135 |
+
} else if (
|
136 |
+
t == DispatchKey::PrivateUse1 || t == DispatchKey::AutogradPrivateUse1) {
|
137 |
+
return Backend::PrivateUse1;
|
138 |
+
} else if (t == DispatchKey::Undefined) {
|
139 |
+
return Backend::Undefined;
|
140 |
+
} else {
|
141 |
+
TORCH_CHECK(false, "Unrecognized tensor type ID: ", t);
|
142 |
+
}
|
143 |
+
}
|
144 |
+
|
145 |
+
static inline DispatchKey backendToDispatchKey(Backend b) {
|
146 |
+
switch (b) {
|
147 |
+
case Backend::CPU:
|
148 |
+
return DispatchKey::CPU;
|
149 |
+
case Backend::CUDA:
|
150 |
+
return DispatchKey::CUDA;
|
151 |
+
case Backend::HIP:
|
152 |
+
return DispatchKey::HIP;
|
153 |
+
case Backend::VE:
|
154 |
+
return DispatchKey::VE;
|
155 |
+
case Backend::FPGA:
|
156 |
+
return DispatchKey::FPGA;
|
157 |
+
case Backend::ORT:
|
158 |
+
return DispatchKey::ORT;
|
159 |
+
case Backend::XLA:
|
160 |
+
return DispatchKey::XLA;
|
161 |
+
case Backend::Lazy:
|
162 |
+
return DispatchKey::Lazy;
|
163 |
+
case Backend::IPU:
|
164 |
+
return DispatchKey::IPU;
|
165 |
+
case Backend::XPU:
|
166 |
+
return DispatchKey::XPU;
|
167 |
+
case Backend::SparseXPU:
|
168 |
+
return DispatchKey::SparseXPU;
|
169 |
+
case Backend::SparseCsrXPU:
|
170 |
+
return DispatchKey::SparseCsrXPU;
|
171 |
+
case Backend::SparseCPU:
|
172 |
+
return DispatchKey::SparseCPU;
|
173 |
+
case Backend::SparseCUDA:
|
174 |
+
return DispatchKey::SparseCUDA;
|
175 |
+
case Backend::SparseHIP:
|
176 |
+
return DispatchKey::SparseHIP;
|
177 |
+
case Backend::SparseVE:
|
178 |
+
return DispatchKey::SparseVE;
|
179 |
+
case Backend::SparsePrivateUse1:
|
180 |
+
return DispatchKey::SparsePrivateUse1;
|
181 |
+
case Backend::SparseCsrCPU:
|
182 |
+
return DispatchKey::SparseCsrCPU;
|
183 |
+
case Backend::SparseCsrCUDA:
|
184 |
+
return DispatchKey::SparseCsrCUDA;
|
185 |
+
case Backend::SparseCsrHIP:
|
186 |
+
return DispatchKey::SparseCsrHIP;
|
187 |
+
case Backend::SparseCsrVE:
|
188 |
+
return DispatchKey::SparseCsrVE;
|
189 |
+
case Backend::SparseCsrPrivateUse1:
|
190 |
+
return DispatchKey::SparseCsrPrivateUse1;
|
191 |
+
case Backend::MkldnnCPU:
|
192 |
+
return DispatchKey::MkldnnCPU;
|
193 |
+
case Backend::Vulkan:
|
194 |
+
return DispatchKey::Vulkan;
|
195 |
+
case Backend::Metal:
|
196 |
+
return DispatchKey::Metal;
|
197 |
+
case Backend::Meta:
|
198 |
+
return DispatchKey::Meta;
|
199 |
+
case Backend::QuantizedCPU:
|
200 |
+
return DispatchKey::QuantizedCPU;
|
201 |
+
case Backend::QuantizedCUDA:
|
202 |
+
return DispatchKey::QuantizedCUDA;
|
203 |
+
case Backend::QuantizedPrivateUse1:
|
204 |
+
return DispatchKey::QuantizedPrivateUse1;
|
205 |
+
case Backend::Undefined:
|
206 |
+
return DispatchKey::Undefined;
|
207 |
+
case Backend::MPS:
|
208 |
+
return DispatchKey::MPS;
|
209 |
+
case Backend::HPU:
|
210 |
+
return DispatchKey::HPU;
|
211 |
+
case Backend::MTIA:
|
212 |
+
return DispatchKey::MTIA;
|
213 |
+
case Backend::PrivateUse1:
|
214 |
+
return DispatchKey::PrivateUse1;
|
215 |
+
default:
|
216 |
+
throw std::runtime_error("Unknown backend");
|
217 |
+
}
|
218 |
+
}
|
219 |
+
|
220 |
+
static inline DeviceType backendToDeviceType(Backend b) {
|
221 |
+
switch (b) {
|
222 |
+
case Backend::CPU:
|
223 |
+
case Backend::MkldnnCPU:
|
224 |
+
case Backend::SparseCPU:
|
225 |
+
case Backend::SparseCsrCPU:
|
226 |
+
case Backend::QuantizedCPU:
|
227 |
+
return DeviceType::CPU;
|
228 |
+
case Backend::CUDA:
|
229 |
+
case Backend::SparseCUDA:
|
230 |
+
case Backend::QuantizedCUDA:
|
231 |
+
case Backend::SparseCsrCUDA:
|
232 |
+
return DeviceType::CUDA;
|
233 |
+
case Backend::HIP:
|
234 |
+
return DeviceType::HIP;
|
235 |
+
case Backend::VE:
|
236 |
+
return DeviceType::VE;
|
237 |
+
case Backend::FPGA:
|
238 |
+
return DeviceType::FPGA;
|
239 |
+
case Backend::ORT:
|
240 |
+
return DeviceType::ORT;
|
241 |
+
case Backend::XLA:
|
242 |
+
return DeviceType::XLA;
|
243 |
+
case Backend::Lazy:
|
244 |
+
return DeviceType::Lazy;
|
245 |
+
case Backend::SparseHIP:
|
246 |
+
return DeviceType::HIP;
|
247 |
+
case Backend::SparseVE:
|
248 |
+
return DeviceType::VE;
|
249 |
+
case Backend::SparseCsrHIP:
|
250 |
+
return DeviceType::HIP;
|
251 |
+
case Backend::SparseCsrVE:
|
252 |
+
return DeviceType::VE;
|
253 |
+
case Backend::IPU:
|
254 |
+
return DeviceType::IPU;
|
255 |
+
case Backend::XPU:
|
256 |
+
case Backend::SparseXPU:
|
257 |
+
case Backend::SparseCsrXPU:
|
258 |
+
case Backend::QuantizedXPU:
|
259 |
+
return DeviceType::XPU;
|
260 |
+
case Backend::Vulkan:
|
261 |
+
return DeviceType::Vulkan;
|
262 |
+
case Backend::Metal:
|
263 |
+
return DeviceType::Metal;
|
264 |
+
case Backend::Meta:
|
265 |
+
return DeviceType::Meta;
|
266 |
+
case Backend::MPS:
|
267 |
+
return DeviceType::MPS;
|
268 |
+
case Backend::HPU:
|
269 |
+
return DeviceType::HPU;
|
270 |
+
case Backend::MTIA:
|
271 |
+
return DeviceType::MTIA;
|
272 |
+
case Backend::PrivateUse1:
|
273 |
+
case Backend::SparsePrivateUse1:
|
274 |
+
case Backend::SparseCsrPrivateUse1:
|
275 |
+
case Backend::QuantizedPrivateUse1:
|
276 |
+
return DeviceType::PrivateUse1;
|
277 |
+
case Backend::Undefined:
|
278 |
+
TORCH_CHECK(false, "Undefined backend is not a valid device type");
|
279 |
+
default:
|
280 |
+
TORCH_CHECK(false, "Unknown backend");
|
281 |
+
}
|
282 |
+
}
|
283 |
+
|
284 |
+
// TODO: This probably shouldn't actually be static inline
|
285 |
+
static inline const char* toString(Backend b) {
|
286 |
+
switch (b) {
|
287 |
+
case Backend::CPU:
|
288 |
+
return "CPU";
|
289 |
+
case Backend::CUDA:
|
290 |
+
return "CUDA";
|
291 |
+
case Backend::HIP:
|
292 |
+
return "HIP";
|
293 |
+
case Backend::VE:
|
294 |
+
return "VE";
|
295 |
+
case Backend::FPGA:
|
296 |
+
return "FPGA";
|
297 |
+
case Backend::XPU:
|
298 |
+
return "XPU";
|
299 |
+
case Backend::IPU:
|
300 |
+
return "IPU";
|
301 |
+
case Backend::ORT:
|
302 |
+
return "ORT";
|
303 |
+
case Backend::XLA:
|
304 |
+
return "XLA";
|
305 |
+
case Backend::Lazy:
|
306 |
+
return "Lazy";
|
307 |
+
case Backend::MPS:
|
308 |
+
return "MPS";
|
309 |
+
case Backend::SparseCPU:
|
310 |
+
return "SparseCPU";
|
311 |
+
case Backend::SparseCUDA:
|
312 |
+
return "SparseCUDA";
|
313 |
+
case Backend::SparseHIP:
|
314 |
+
return "SparseHIP";
|
315 |
+
case Backend::SparseVE:
|
316 |
+
return "SparseVE";
|
317 |
+
case Backend::SparseXPU:
|
318 |
+
return "SparseXPU";
|
319 |
+
case Backend::SparsePrivateUse1:
|
320 |
+
return "SparsePrivateUse1";
|
321 |
+
case Backend::SparseCsrCPU:
|
322 |
+
return "SparseCsrCPU";
|
323 |
+
case Backend::SparseCsrCUDA:
|
324 |
+
return "SparseCsrCUDA";
|
325 |
+
case Backend::SparseCsrHIP:
|
326 |
+
return "SparseCsrHIP";
|
327 |
+
case Backend::SparseCsrVE:
|
328 |
+
return "SparseCsrVE";
|
329 |
+
case Backend::SparseCsrXPU:
|
330 |
+
return "SparseCsrXPU";
|
331 |
+
case Backend::SparseCsrPrivateUse1:
|
332 |
+
return "SparseCsrPrivateUse1";
|
333 |
+
case Backend::MkldnnCPU:
|
334 |
+
return "MkldnnCPU";
|
335 |
+
case Backend::Vulkan:
|
336 |
+
return "Vulkan";
|
337 |
+
case Backend::Metal:
|
338 |
+
return "Metal";
|
339 |
+
case Backend::Meta:
|
340 |
+
return "Meta";
|
341 |
+
case Backend::QuantizedCPU:
|
342 |
+
return "QuantizedCPU";
|
343 |
+
case Backend::QuantizedCUDA:
|
344 |
+
return "QuantizedCUDA";
|
345 |
+
case Backend::QuantizedXPU:
|
346 |
+
return "QuantizedXPU";
|
347 |
+
case Backend::QuantizedPrivateUse1:
|
348 |
+
return "QuantizedPrivateUse1";
|
349 |
+
case Backend::HPU:
|
350 |
+
return "HPU";
|
351 |
+
case Backend::MTIA:
|
352 |
+
return "MTIA";
|
353 |
+
case Backend::PrivateUse1:
|
354 |
+
return "PrivateUseOne";
|
355 |
+
default:
|
356 |
+
return "UNKNOWN_BACKEND";
|
357 |
+
}
|
358 |
+
}
|
359 |
+
|
360 |
+
static inline bool isSparse(Backend b) {
|
361 |
+
switch (b) {
|
362 |
+
case Backend::SparseXPU:
|
363 |
+
case Backend::SparseCPU:
|
364 |
+
case Backend::SparseCUDA:
|
365 |
+
case Backend::SparseHIP:
|
366 |
+
case Backend::SparseVE:
|
367 |
+
case Backend::SparsePrivateUse1:
|
368 |
+
return true;
|
369 |
+
default:
|
370 |
+
return false;
|
371 |
+
}
|
372 |
+
}
|
373 |
+
|
374 |
+
static inline bool isSparseCsr(Backend b) {
|
375 |
+
switch (b) {
|
376 |
+
case Backend::SparseCsrXPU:
|
377 |
+
case Backend::SparseCsrCPU:
|
378 |
+
case Backend::SparseCsrCUDA:
|
379 |
+
case Backend::SparseCsrHIP:
|
380 |
+
case Backend::SparseCsrVE:
|
381 |
+
case Backend::SparseCsrPrivateUse1:
|
382 |
+
return true;
|
383 |
+
default:
|
384 |
+
return false;
|
385 |
+
}
|
386 |
+
}
|
387 |
+
|
388 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/CPUAllocator.h
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <cstring>
|
5 |
+
#include <mutex>
|
6 |
+
#include <unordered_map>
|
7 |
+
|
8 |
+
#include <c10/core/Allocator.h>
|
9 |
+
#include <c10/macros/Export.h>
|
10 |
+
#include <c10/util/Flags.h>
|
11 |
+
|
12 |
+
// TODO: rename to c10
|
13 |
+
C10_DECLARE_bool(caffe2_report_cpu_memory_usage);
|
14 |
+
|
15 |
+
namespace c10 {
|
16 |
+
|
17 |
+
using MemoryDeleter = void (*)(void*);
|
18 |
+
|
19 |
+
// A helper function that is basically doing nothing.
|
20 |
+
C10_API void NoDelete(void*);
|
21 |
+
|
22 |
+
// A simple struct that is used to report C10's memory allocation,
|
23 |
+
// deallocation status and out-of-memory events to the profiler
|
24 |
+
class C10_API ProfiledCPUMemoryReporter {
|
25 |
+
public:
|
26 |
+
ProfiledCPUMemoryReporter() = default;
|
27 |
+
void New(void* ptr, size_t nbytes);
|
28 |
+
void OutOfMemory(size_t nbytes);
|
29 |
+
void Delete(void* ptr);
|
30 |
+
|
31 |
+
private:
|
32 |
+
std::mutex mutex_;
|
33 |
+
std::unordered_map<void*, size_t> size_table_;
|
34 |
+
size_t allocated_ = 0;
|
35 |
+
size_t log_cnt_ = 0;
|
36 |
+
};
|
37 |
+
|
38 |
+
C10_API ProfiledCPUMemoryReporter& profiledCPUMemoryReporter();
|
39 |
+
|
40 |
+
// Get the CPU Allocator.
|
41 |
+
C10_API at::Allocator* GetCPUAllocator();
|
42 |
+
// Sets the CPU allocator to the given allocator: the caller gives away the
|
43 |
+
// ownership of the pointer.
|
44 |
+
C10_API void SetCPUAllocator(at::Allocator* alloc, uint8_t priority = 0);
|
45 |
+
|
46 |
+
// Get the Default CPU Allocator
|
47 |
+
C10_API at::Allocator* GetDefaultCPUAllocator();
|
48 |
+
|
49 |
+
// Get the Default Mobile CPU Allocator
|
50 |
+
C10_API at::Allocator* GetDefaultMobileCPUAllocator();
|
51 |
+
|
52 |
+
// The CPUCachingAllocator is experimental and might disappear in the future.
|
53 |
+
// The only place that uses it is in StaticRuntime.
|
54 |
+
// Set the CPU Caching Allocator
|
55 |
+
C10_API void SetCPUCachingAllocator(Allocator* alloc, uint8_t priority = 0);
|
56 |
+
// Get the CPU Caching Allocator
|
57 |
+
C10_API Allocator* GetCPUCachingAllocator();
|
58 |
+
|
59 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/CompileTimeFunctionPointer.h
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/TypeTraits.h>
|
4 |
+
#include <type_traits>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
|
8 |
+
/**
|
9 |
+
* Represent a function pointer as a C++ type.
|
10 |
+
* This allows using the function pointer as a type
|
11 |
+
* in a template and calling it from inside the template
|
12 |
+
* allows the compiler to inline the call because it
|
13 |
+
* knows the function pointer at compile time.
|
14 |
+
*
|
15 |
+
* Example 1:
|
16 |
+
* int add(int a, int b) {return a + b;}
|
17 |
+
* using Add = TORCH_FN_TYPE(add);
|
18 |
+
* template<class Func> struct Executor {
|
19 |
+
* int execute(int a, int b) {
|
20 |
+
* return Func::func_ptr()(a, b);
|
21 |
+
* }
|
22 |
+
* };
|
23 |
+
* Executor<Add> executor;
|
24 |
+
* EXPECT_EQ(3, executor.execute(1, 2));
|
25 |
+
*
|
26 |
+
* Example 2:
|
27 |
+
* int add(int a, int b) {return a + b;}
|
28 |
+
* template<class Func> int execute(Func, int a, int b) {
|
29 |
+
* return Func::func_ptr()(a, b);
|
30 |
+
* }
|
31 |
+
* EXPECT_EQ(3, execute(TORCH_FN(add), 1, 2));
|
32 |
+
*/
|
33 |
+
template <class FuncType_, FuncType_* func_ptr_>
|
34 |
+
struct CompileTimeFunctionPointer final {
|
35 |
+
static_assert(
|
36 |
+
guts::is_function_type<FuncType_>::value,
|
37 |
+
"TORCH_FN can only wrap function types.");
|
38 |
+
using FuncType = FuncType_;
|
39 |
+
|
40 |
+
static constexpr FuncType* func_ptr() {
|
41 |
+
return func_ptr_;
|
42 |
+
}
|
43 |
+
};
|
44 |
+
|
45 |
+
template <class T>
|
46 |
+
struct is_compile_time_function_pointer : std::false_type {};
|
47 |
+
template <class FuncType, FuncType* func_ptr>
|
48 |
+
struct is_compile_time_function_pointer<
|
49 |
+
CompileTimeFunctionPointer<FuncType, func_ptr>> : std::true_type {};
|
50 |
+
|
51 |
+
} // namespace c10
|
52 |
+
|
53 |
+
#define TORCH_FN_TYPE(func) \
|
54 |
+
::c10::CompileTimeFunctionPointer< \
|
55 |
+
std::remove_pointer_t<std::remove_reference_t<decltype(func)>>, \
|
56 |
+
func>
|
57 |
+
#define TORCH_FN(func) TORCH_FN_TYPE(func)()
|
venv/lib/python3.10/site-packages/torch/include/c10/core/Contiguity.h
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
#include <c10/core/SymBool.h>
|
3 |
+
#include <c10/core/SymInt.h>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
#include <c10/util/SmallVector.h>
|
6 |
+
#include <c10/util/irange.h>
|
7 |
+
|
8 |
+
#include <algorithm>
|
9 |
+
#include <cstdint>
|
10 |
+
|
11 |
+
namespace c10 {
|
12 |
+
|
13 |
+
template <typename T>
|
14 |
+
bool _compute_contiguous(ArrayRef<T> sizes, ArrayRef<T> strides, T numel) {
|
15 |
+
bool is_contiguous = true;
|
16 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(numel, 0))) {
|
17 |
+
return is_contiguous;
|
18 |
+
}
|
19 |
+
T z = 1;
|
20 |
+
// NB: make sure we do signed arithmetic
|
21 |
+
for (int64_t d = int64_t(sizes.size()) - 1; d >= 0; d--) {
|
22 |
+
const auto& size_d = sizes[d];
|
23 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
24 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(strides[d], z))) {
|
25 |
+
z *= size_d;
|
26 |
+
} else {
|
27 |
+
is_contiguous = false;
|
28 |
+
break;
|
29 |
+
}
|
30 |
+
}
|
31 |
+
}
|
32 |
+
return is_contiguous;
|
33 |
+
}
|
34 |
+
|
35 |
+
template <typename T>
|
36 |
+
bool _compute_channels_last_contiguous_2d(
|
37 |
+
ArrayRef<T> sizes,
|
38 |
+
ArrayRef<T> strides) {
|
39 |
+
// Please don't combine these code, constant array is used here to let
|
40 |
+
// compiler fully unroll the loop to get better performance
|
41 |
+
switch (sizes.size()) {
|
42 |
+
case 4: {
|
43 |
+
T expected = 1;
|
44 |
+
for (auto& d : {1, 3, 2, 0}) {
|
45 |
+
const auto& size_d = sizes[d];
|
46 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
47 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
|
48 |
+
return false;
|
49 |
+
}
|
50 |
+
expected *= size_d;
|
51 |
+
}
|
52 |
+
}
|
53 |
+
return true;
|
54 |
+
}
|
55 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
56 |
+
case 3:
|
57 |
+
// TODO dim == 3 case will be enabled once it is fully tested
|
58 |
+
return false;
|
59 |
+
default:
|
60 |
+
return false;
|
61 |
+
}
|
62 |
+
}
|
63 |
+
|
64 |
+
template <typename T>
|
65 |
+
bool _compute_channels_last_contiguous_3d(
|
66 |
+
ArrayRef<T> sizes,
|
67 |
+
ArrayRef<T> strides) {
|
68 |
+
// Please don't combine these code, constant array is used here to let
|
69 |
+
// compiler fully unroll the loop to get better performance
|
70 |
+
switch (sizes.size()) {
|
71 |
+
case 5: {
|
72 |
+
T expected = 1;
|
73 |
+
for (auto& d : {1, 4, 3, 2, 0}) {
|
74 |
+
const auto& size_d = sizes[d];
|
75 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(size_d, 1))) {
|
76 |
+
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(strides[d], expected))) {
|
77 |
+
return false;
|
78 |
+
}
|
79 |
+
expected *= size_d;
|
80 |
+
}
|
81 |
+
}
|
82 |
+
return true;
|
83 |
+
}
|
84 |
+
// NOLINTNEXTLINE(bugprone-branch-clone)
|
85 |
+
case 4:
|
86 |
+
// TODO dim == 4 case will be enabled once it is fully tested
|
87 |
+
return false;
|
88 |
+
default:
|
89 |
+
return false;
|
90 |
+
}
|
91 |
+
}
|
92 |
+
|
93 |
+
template <typename T>
|
94 |
+
bool _compute_non_overlapping_and_dense(
|
95 |
+
ArrayRef<T> sizes,
|
96 |
+
ArrayRef<T> strides) {
|
97 |
+
auto dim = sizes.size();
|
98 |
+
if (dim == 1) {
|
99 |
+
return sizes[0] < 2 || strides[0] == 1;
|
100 |
+
}
|
101 |
+
SmallVector<int64_t, 5> perm;
|
102 |
+
perm.resize(dim);
|
103 |
+
for (const auto i : c10::irange(dim)) {
|
104 |
+
perm[i] = i;
|
105 |
+
}
|
106 |
+
// Sort by strides, leaving 0 and 1 sized dims at the end of the array
|
107 |
+
std::sort(perm.begin(), perm.end(), [&](int64_t a, int64_t b) {
|
108 |
+
if (sizes[a] < 2) {
|
109 |
+
return false;
|
110 |
+
} else if (sizes[b] < 2) {
|
111 |
+
return true;
|
112 |
+
}
|
113 |
+
return strides[a] < strides[b];
|
114 |
+
});
|
115 |
+
T require_stride = 1;
|
116 |
+
for (const auto i : c10::irange(dim)) {
|
117 |
+
const auto& size_perm_i = sizes[perm[i]];
|
118 |
+
if (size_perm_i < 2) {
|
119 |
+
return true;
|
120 |
+
}
|
121 |
+
if (strides[perm[i]] != require_stride) {
|
122 |
+
return false;
|
123 |
+
}
|
124 |
+
require_stride *= size_perm_i;
|
125 |
+
}
|
126 |
+
return true;
|
127 |
+
}
|
128 |
+
|
129 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/DefaultTensorOptions.h
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/DeviceType.h>
|
5 |
+
#include <c10/core/Layout.h>
|
6 |
+
#include <c10/core/ScalarType.h>
|
7 |
+
#include <c10/util/typeid.h>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
|
11 |
+
struct TensorOptions;
|
12 |
+
|
13 |
+
/// Like TensorOptions, but all fields are guaranteed to be filled.
|
14 |
+
struct DefaultTensorOptions {
|
15 |
+
DefaultTensorOptions() = default;
|
16 |
+
|
17 |
+
caffe2::TypeMeta dtype() const noexcept {
|
18 |
+
return dtype_;
|
19 |
+
}
|
20 |
+
Device device() const noexcept {
|
21 |
+
return device_;
|
22 |
+
}
|
23 |
+
Layout layout() const noexcept {
|
24 |
+
return layout_;
|
25 |
+
}
|
26 |
+
bool requires_grad() const noexcept {
|
27 |
+
return requires_grad_;
|
28 |
+
}
|
29 |
+
|
30 |
+
// Defined in TensorOptions.h
|
31 |
+
inline DefaultTensorOptions& merge(const TensorOptions& options);
|
32 |
+
|
33 |
+
private:
|
34 |
+
caffe2::TypeMeta dtype_ = caffe2::TypeMeta::Make<float>(); // 64-bit
|
35 |
+
Device device_ = at::kCPU; // 32-bit
|
36 |
+
Layout layout_ = at::kStrided; // 8-bit
|
37 |
+
bool requires_grad_ = false; // 8-bit
|
38 |
+
};
|
39 |
+
|
40 |
+
inline const DefaultTensorOptions& getDefaultTensorOptions() {
|
41 |
+
static const auto options = DefaultTensorOptions();
|
42 |
+
return options;
|
43 |
+
}
|
44 |
+
|
45 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceGuard.h
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
5 |
+
#include <c10/core/impl/InlineDeviceGuard.h>
|
6 |
+
#include <c10/core/impl/VirtualGuardImpl.h>
|
7 |
+
#include <c10/util/Optional.h>
|
8 |
+
|
9 |
+
namespace c10 {
|
10 |
+
|
11 |
+
/// RAII guard that sets a certain default device in its constructor, and
|
12 |
+
/// changes it back to the device that was originally active upon destruction.
|
13 |
+
///
|
14 |
+
/// The device is always reset to the one that was active at the time of
|
15 |
+
/// construction of the guard. Even if you `set_device` after construction, the
|
16 |
+
/// destructor will still reset the device to the one that was active at
|
17 |
+
/// construction time.
|
18 |
+
///
|
19 |
+
/// This device guard does NOT have an uninitialized state; it is guaranteed
|
20 |
+
/// to reset a device on exit. If you are in a situation where you *might*
|
21 |
+
/// want to setup a guard (i.e., are looking for the moral equivalent
|
22 |
+
/// of optional<DeviceGuard>), see OptionalDeviceGuard.
|
23 |
+
class DeviceGuard {
|
24 |
+
public:
|
25 |
+
/// No default constructor; see Note [Omitted default constructor from RAII]
|
26 |
+
explicit DeviceGuard() = delete;
|
27 |
+
|
28 |
+
/// Set the current device to the passed Device.
|
29 |
+
explicit DeviceGuard(Device device) : guard_(device) {}
|
30 |
+
|
31 |
+
/// This constructor is for testing only.
|
32 |
+
explicit DeviceGuard(
|
33 |
+
Device device,
|
34 |
+
const impl::DeviceGuardImplInterface* impl)
|
35 |
+
: guard_(device, impl) {}
|
36 |
+
|
37 |
+
/// Copy is disallowed
|
38 |
+
DeviceGuard(const DeviceGuard&) = delete;
|
39 |
+
DeviceGuard& operator=(const DeviceGuard&) = delete;
|
40 |
+
|
41 |
+
/// Move is disallowed, as DeviceGuard does not have an uninitialized state,
|
42 |
+
/// which is required for moves on types with nontrivial destructors.
|
43 |
+
DeviceGuard(DeviceGuard&& other) = delete;
|
44 |
+
DeviceGuard& operator=(DeviceGuard&& other) = delete;
|
45 |
+
|
46 |
+
/// Sets the device to the given one. The specified device must be consistent
|
47 |
+
/// with the device type originally specified during guard construction.
|
48 |
+
///
|
49 |
+
/// TODO: The consistency check here is inconsistent with StreamGuard's
|
50 |
+
/// behavior with set_stream, where a stream on a different device than
|
51 |
+
/// the original one isn't an error; we just reset the stream and then
|
52 |
+
/// switch devices.
|
53 |
+
void reset_device(at::Device device) {
|
54 |
+
guard_.reset_device(device);
|
55 |
+
}
|
56 |
+
|
57 |
+
/// This method is for testing only.
|
58 |
+
void reset_device(
|
59 |
+
at::Device device,
|
60 |
+
const impl::DeviceGuardImplInterface* impl) {
|
61 |
+
guard_.reset_device(device, impl);
|
62 |
+
}
|
63 |
+
|
64 |
+
/// Sets the device index to the given one. The device type is inferred
|
65 |
+
/// from the original device type the guard was constructed with.
|
66 |
+
void set_index(DeviceIndex index) {
|
67 |
+
guard_.set_index(index);
|
68 |
+
}
|
69 |
+
|
70 |
+
/// Returns the device that was set at the time the guard was constructed.
|
71 |
+
Device original_device() const {
|
72 |
+
return guard_.original_device();
|
73 |
+
}
|
74 |
+
|
75 |
+
/// Returns the most recent device that was set using this device guard,
|
76 |
+
/// either from construction, or via set_device.
|
77 |
+
Device current_device() const {
|
78 |
+
return guard_.current_device();
|
79 |
+
}
|
80 |
+
|
81 |
+
private:
|
82 |
+
impl::InlineDeviceGuard<impl::VirtualGuardImpl> guard_;
|
83 |
+
};
|
84 |
+
|
85 |
+
/**
|
86 |
+
* A OptionalDeviceGuard is an RAII class that sets a device to some value on
|
87 |
+
* initialization, and resets the device to its original value on destruction.
|
88 |
+
* Morally, a OptionalDeviceGuard is equivalent to optional<DeviceGuard>, but
|
89 |
+
* with extra constructors and methods as appropriate.
|
90 |
+
*
|
91 |
+
* Besides its obvious use (optionally applying a DeviceGuard),
|
92 |
+
* OptionalDeviceGuard is often also used for the following idiom:
|
93 |
+
*
|
94 |
+
* OptionalDeviceGuard g;
|
95 |
+
* for (const auto& t : tensors) {
|
96 |
+
* g.set_device(t.device());
|
97 |
+
* do_something_with(t);
|
98 |
+
* }
|
99 |
+
*
|
100 |
+
* This usage is marginally more efficient than constructing a DeviceGuard every
|
101 |
+
* iteration of the for loop, as it avoids an unnecessary device reset.
|
102 |
+
*
|
103 |
+
* Unlike DeviceGuard, a OptionalDeviceGuard may be uninitialized. This occurs
|
104 |
+
* when you use the nullary constructor, or pass a nullopt to the constructor.
|
105 |
+
* Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the
|
106 |
+
* original device was and they do not reset on destruction. This is why
|
107 |
+
* original_device() and current_device() return optional<Device> rather than
|
108 |
+
* Device (as they do in DeviceGuard), and also is why we didn't just
|
109 |
+
* provide OptionalDeviceGuard by default and hide DeviceGuard from users.
|
110 |
+
*
|
111 |
+
* The semantics of an OptionalDeviceGuard are exactly explained by thinking
|
112 |
+
* of it as an optional<DeviceGuard>. In particular, an initialized
|
113 |
+
* OptionalDeviceGuard doesn't restore device to its value at construction; it
|
114 |
+
* restores device to its value *at initialization*. So if you have the
|
115 |
+
* program:
|
116 |
+
*
|
117 |
+
* setDevice(1);
|
118 |
+
* OptionalDeviceGuard g;
|
119 |
+
* setDevice(2);
|
120 |
+
* g.reset_device(Device(DeviceType::CUDA, 3)); // initializes!
|
121 |
+
*
|
122 |
+
* On destruction, g will reset device to 2, rather than 1.
|
123 |
+
*
|
124 |
+
* An uninitialized OptionalDeviceGuard is distinct from a (initialized)
|
125 |
+
* DeviceGuard whose original_device_ and current_device_ match, since the
|
126 |
+
* DeviceGuard will still reset the device to original_device_.
|
127 |
+
*/
|
128 |
+
class OptionalDeviceGuard {
|
129 |
+
public:
|
130 |
+
/// Create an uninitialized guard. Set the guard later using reset_device.
|
131 |
+
explicit OptionalDeviceGuard() = default;
|
132 |
+
|
133 |
+
/// Initialize the guard, setting the current device to the passed Device.
|
134 |
+
explicit OptionalDeviceGuard(Device device) : guard_(device) {}
|
135 |
+
|
136 |
+
/// Initialize the guard if a Device is passed; otherwise leave the
|
137 |
+
/// guard uninitialized.
|
138 |
+
explicit OptionalDeviceGuard(optional<Device> device) : guard_(device) {}
|
139 |
+
|
140 |
+
/// Constructor for testing only.
|
141 |
+
explicit OptionalDeviceGuard(
|
142 |
+
Device device,
|
143 |
+
const impl::DeviceGuardImplInterface* impl)
|
144 |
+
: guard_(device, impl) {}
|
145 |
+
|
146 |
+
/// Copy is disallowed
|
147 |
+
OptionalDeviceGuard(const OptionalDeviceGuard&) = delete;
|
148 |
+
OptionalDeviceGuard& operator=(const OptionalDeviceGuard&) = delete;
|
149 |
+
|
150 |
+
/// Move is disallowed
|
151 |
+
/// See Note [Explicit initialization of optional fields]
|
152 |
+
/// and // Note [Move construction for RAII guards is tricky]
|
153 |
+
/// for rationale.
|
154 |
+
OptionalDeviceGuard(OptionalDeviceGuard&& other) = delete;
|
155 |
+
OptionalDeviceGuard& operator=(OptionalDeviceGuard&& other) = delete;
|
156 |
+
|
157 |
+
/// Sets the device to the given one. The specified device must be consistent
|
158 |
+
/// with the device type originally specified during guard construction.
|
159 |
+
void reset_device(at::Device device) {
|
160 |
+
guard_.reset_device(device);
|
161 |
+
}
|
162 |
+
|
163 |
+
/// For testing only
|
164 |
+
void reset_device(
|
165 |
+
at::Device device,
|
166 |
+
const impl::DeviceGuardImplInterface* impl) {
|
167 |
+
guard_.reset_device(device, impl);
|
168 |
+
}
|
169 |
+
|
170 |
+
/// Returns the device that was set at the time the guard was constructed.
|
171 |
+
optional<Device> original_device() const {
|
172 |
+
return guard_.original_device();
|
173 |
+
}
|
174 |
+
|
175 |
+
/// Returns the most recent device that was set using this device guard,
|
176 |
+
/// either from construction, or via reset_device.
|
177 |
+
optional<Device> current_device() const {
|
178 |
+
return guard_.current_device();
|
179 |
+
}
|
180 |
+
|
181 |
+
private:
|
182 |
+
impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl> guard_{};
|
183 |
+
};
|
184 |
+
|
185 |
+
// Note [Whither the DeviceGuard boilerplate]
|
186 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
187 |
+
// Design note: in principle, we could avoid these wrappers using:
|
188 |
+
//
|
189 |
+
// using DeviceGuard = impl::InlineDeviceGuard<impl::VirtualGuardImpl>;
|
190 |
+
// using OptionalDeviceGuard =
|
191 |
+
// impl::InlineOptionalDeviceGuard<impl::VirtualGuardImpl>;
|
192 |
+
//
|
193 |
+
// But the error messages are worse, and our users can't just look at the
|
194 |
+
// header file to find out what's going on. Furthermore, for specializations
|
195 |
+
// like CUDAStreamGuard, it can be profitable to replace some interfaces with
|
196 |
+
// refined types (e.g., return CUDAStream instead of Stream). So, we eat
|
197 |
+
// the boilerplate and write out the API explicitly.
|
198 |
+
|
199 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/DeviceType.h
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// This is directly synchronized with caffe2/proto/caffe2.proto, but
|
4 |
+
// doesn't require me to figure out how to get Protobuf headers into
|
5 |
+
// ATen/core (which would require a lot more build system hacking.)
|
6 |
+
// If you modify me, keep me synchronized with that file.
|
7 |
+
|
8 |
+
#include <c10/macros/Export.h>
|
9 |
+
|
10 |
+
#include <cstddef>
|
11 |
+
#include <cstdint>
|
12 |
+
#include <functional>
|
13 |
+
#include <ostream>
|
14 |
+
#include <string>
|
15 |
+
|
16 |
+
namespace c10 {
|
17 |
+
|
18 |
+
// These contains all device types that also have a BackendComponent
|
19 |
+
// and therefore participate in per-backend functionality dispatch keys.
|
20 |
+
// This is most backends except PrivateUse2 and PrivateUse3
|
21 |
+
#define C10_FORALL_BACKEND_DEVICE_TYPES(_, extra) \
|
22 |
+
_(CPU, extra) \
|
23 |
+
_(CUDA, extra) \
|
24 |
+
_(HIP, extra) \
|
25 |
+
_(XLA, extra) \
|
26 |
+
_(MPS, extra) \
|
27 |
+
_(IPU, extra) \
|
28 |
+
_(XPU, extra) \
|
29 |
+
_(HPU, extra) \
|
30 |
+
_(VE, extra) \
|
31 |
+
_(Lazy, extra) \
|
32 |
+
_(Meta, extra) \
|
33 |
+
_(MTIA, extra) \
|
34 |
+
_(PrivateUse1, extra)
|
35 |
+
|
36 |
+
enum class DeviceType : int8_t {
|
37 |
+
CPU = 0,
|
38 |
+
CUDA = 1, // CUDA.
|
39 |
+
MKLDNN = 2, // Reserved for explicit MKLDNN
|
40 |
+
OPENGL = 3, // OpenGL
|
41 |
+
OPENCL = 4, // OpenCL
|
42 |
+
IDEEP = 5, // IDEEP.
|
43 |
+
HIP = 6, // AMD HIP
|
44 |
+
FPGA = 7, // FPGA
|
45 |
+
ORT = 8, // ONNX Runtime / Microsoft
|
46 |
+
XLA = 9, // XLA / TPU
|
47 |
+
Vulkan = 10, // Vulkan
|
48 |
+
Metal = 11, // Metal
|
49 |
+
XPU = 12, // XPU
|
50 |
+
MPS = 13, // MPS
|
51 |
+
Meta = 14, // Meta (tensors with no data)
|
52 |
+
HPU = 15, // HPU / HABANA
|
53 |
+
VE = 16, // SX-Aurora / NEC
|
54 |
+
Lazy = 17, // Lazy Tensors
|
55 |
+
IPU = 18, // Graphcore IPU
|
56 |
+
MTIA = 19, // Meta training and inference devices
|
57 |
+
PrivateUse1 = 20, // PrivateUse1 device
|
58 |
+
// NB: If you add more devices:
|
59 |
+
// - Change the implementations of DeviceTypeName and isValidDeviceType
|
60 |
+
// in DeviceType.cpp
|
61 |
+
// - Change the number below
|
62 |
+
COMPILE_TIME_MAX_DEVICE_TYPES = 21,
|
63 |
+
};
|
64 |
+
|
65 |
+
constexpr DeviceType kCPU = DeviceType::CPU;
|
66 |
+
constexpr DeviceType kCUDA = DeviceType::CUDA;
|
67 |
+
constexpr DeviceType kHIP = DeviceType::HIP;
|
68 |
+
constexpr DeviceType kFPGA = DeviceType::FPGA;
|
69 |
+
constexpr DeviceType kORT = DeviceType::ORT;
|
70 |
+
constexpr DeviceType kXLA = DeviceType::XLA;
|
71 |
+
constexpr DeviceType kMPS = DeviceType::MPS;
|
72 |
+
constexpr DeviceType kMeta = DeviceType::Meta;
|
73 |
+
constexpr DeviceType kVulkan = DeviceType::Vulkan;
|
74 |
+
constexpr DeviceType kMetal = DeviceType::Metal;
|
75 |
+
constexpr DeviceType kXPU = DeviceType::XPU;
|
76 |
+
constexpr DeviceType kHPU = DeviceType::HPU;
|
77 |
+
constexpr DeviceType kVE = DeviceType::VE;
|
78 |
+
constexpr DeviceType kLazy = DeviceType::Lazy;
|
79 |
+
constexpr DeviceType kIPU = DeviceType::IPU;
|
80 |
+
constexpr DeviceType kMTIA = DeviceType::MTIA;
|
81 |
+
constexpr DeviceType kPrivateUse1 = DeviceType::PrivateUse1;
|
82 |
+
|
83 |
+
// define explicit int constant
|
84 |
+
constexpr int COMPILE_TIME_MAX_DEVICE_TYPES =
|
85 |
+
static_cast<int>(DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES);
|
86 |
+
|
87 |
+
static_assert(
|
88 |
+
COMPILE_TIME_MAX_DEVICE_TYPES <= 21,
|
89 |
+
"Hey! You seem to be adding a lot of new DeviceTypes. The intent was "
|
90 |
+
"for this constant to reflect the actual number of DeviceTypes we support "
|
91 |
+
"in PyTorch; it's important that this number is not too large as we "
|
92 |
+
"use this to allocate stack arrays in some places in our code. If you "
|
93 |
+
"are indeed just adding the 20th device type, feel free to change "
|
94 |
+
"the check to 32; but if you are adding some sort of extensible device "
|
95 |
+
"types registration, please be aware that you are affecting code that "
|
96 |
+
"this number is small. Try auditing uses of this constant.");
|
97 |
+
|
98 |
+
C10_API std::string DeviceTypeName(DeviceType d, bool lower_case = false);
|
99 |
+
|
100 |
+
C10_API bool isValidDeviceType(DeviceType d);
|
101 |
+
|
102 |
+
C10_API std::ostream& operator<<(std::ostream& stream, DeviceType type);
|
103 |
+
|
104 |
+
C10_API void register_privateuse1_backend(const std::string& backend_name);
|
105 |
+
C10_API std::string get_privateuse1_backend(bool lower_case = true);
|
106 |
+
|
107 |
+
C10_API bool is_privateuse1_backend_registered();
|
108 |
+
|
109 |
+
} // namespace c10
|
110 |
+
|
111 |
+
namespace std {
|
112 |
+
template <>
|
113 |
+
struct hash<c10::DeviceType> {
|
114 |
+
std::size_t operator()(c10::DeviceType k) const {
|
115 |
+
return std::hash<int>()(static_cast<int>(k));
|
116 |
+
}
|
117 |
+
};
|
118 |
+
} // namespace std
|
119 |
+
|
120 |
+
namespace torch {
|
121 |
+
// NOLINTNEXTLINE(misc-unused-using-decls)
|
122 |
+
using c10::DeviceType;
|
123 |
+
} // namespace torch
|
venv/lib/python3.10/site-packages/torch/include/c10/core/GeneratorImpl.h
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <cstdint>
|
4 |
+
#include <mutex>
|
5 |
+
|
6 |
+
#include <c10/core/Device.h>
|
7 |
+
#include <c10/core/DispatchKeySet.h>
|
8 |
+
#include <c10/core/TensorImpl.h>
|
9 |
+
#include <c10/macros/Export.h>
|
10 |
+
#include <c10/util/intrusive_ptr.h>
|
11 |
+
#include <c10/util/python_stub.h>
|
12 |
+
|
13 |
+
/**
|
14 |
+
* Note [Generator]
|
15 |
+
* ~~~~~~~~~~~~~~~~
|
16 |
+
* A Pseudo Random Number Generator (PRNG) is an engine that uses an algorithm
|
17 |
+
* to generate a seemingly random sequence of numbers, that may be later be used
|
18 |
+
* in creating a random distribution. Such an engine almost always maintains a
|
19 |
+
* state and requires a seed to start off the creation of random numbers. Often
|
20 |
+
* times, users have found it beneficial to be able to explicitly create,
|
21 |
+
* retain, and destroy PRNG states and also be able to have control over the
|
22 |
+
* seed value.
|
23 |
+
*
|
24 |
+
* A Generator in ATen gives users the ability to read, write and modify a PRNG
|
25 |
+
* engine. For instance, it does so by letting users seed a PRNG engine, fork
|
26 |
+
* the state of the engine, etc.
|
27 |
+
*
|
28 |
+
* By default, there is one generator per device, and a device's generator is
|
29 |
+
* lazily created. A user can use the torch.Generator() api to create their own
|
30 |
+
* generator. Currently torch.Generator() can only create a CPUGeneratorImpl.
|
31 |
+
*/
|
32 |
+
|
33 |
+
/**
|
34 |
+
* Note [Acquire lock when using random generators]
|
35 |
+
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
36 |
+
* Generator and its derived classes are NOT thread-safe. Please note that most
|
37 |
+
* of the places where we have inserted locking for generators are historically
|
38 |
+
* based, and we haven't actually checked that everything is truly thread safe
|
39 |
+
* (and it probably isn't). Please use the public mutex_ when using any methods
|
40 |
+
* from these classes, except for the read-only methods. You can learn about the
|
41 |
+
* usage by looking into the unittests (aten/src/ATen/cpu_generator_test.cpp)
|
42 |
+
* and other places where we have used lock_guard.
|
43 |
+
*
|
44 |
+
* TODO: Look into changing the threading semantics of Generators in ATen (e.g.,
|
45 |
+
* making them non-thread safe and instead making the generator state
|
46 |
+
* splittable, to accommodate forks into other threads).
|
47 |
+
*/
|
48 |
+
|
49 |
+
namespace c10 {
|
50 |
+
|
51 |
+
// The default seed is selected to be a large number
|
52 |
+
// with good distribution of 0s and 1s in bit representation
|
53 |
+
constexpr uint64_t default_rng_seed_val = 67280421310721;
|
54 |
+
|
55 |
+
struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
|
56 |
+
// Constructors
|
57 |
+
GeneratorImpl(Device device_in, DispatchKeySet key_set);
|
58 |
+
|
59 |
+
// Delete all copy and move assignment in favor of clone()
|
60 |
+
// method
|
61 |
+
GeneratorImpl(const GeneratorImpl& other) = delete;
|
62 |
+
GeneratorImpl(GeneratorImpl&& other) = delete;
|
63 |
+
GeneratorImpl& operator=(const GeneratorImpl& other) = delete;
|
64 |
+
|
65 |
+
~GeneratorImpl() override = default;
|
66 |
+
c10::intrusive_ptr<GeneratorImpl> clone() const;
|
67 |
+
|
68 |
+
// Common methods for all generators
|
69 |
+
virtual void set_current_seed(uint64_t seed) = 0;
|
70 |
+
virtual void set_offset(uint64_t offset) = 0;
|
71 |
+
virtual uint64_t get_offset() const = 0;
|
72 |
+
virtual uint64_t current_seed() const = 0;
|
73 |
+
virtual uint64_t seed() = 0;
|
74 |
+
virtual void set_state(const c10::TensorImpl& new_state) = 0;
|
75 |
+
virtual c10::intrusive_ptr<c10::TensorImpl> get_state() const = 0;
|
76 |
+
Device device() const;
|
77 |
+
|
78 |
+
// See Note [Acquire lock when using random generators]
|
79 |
+
std::mutex mutex_;
|
80 |
+
|
81 |
+
DispatchKeySet key_set() const {
|
82 |
+
return key_set_;
|
83 |
+
}
|
84 |
+
|
85 |
+
inline void set_pyobj(PyObject* pyobj) noexcept {
|
86 |
+
pyobj_ = pyobj;
|
87 |
+
}
|
88 |
+
|
89 |
+
inline PyObject* pyobj() const noexcept {
|
90 |
+
return pyobj_;
|
91 |
+
}
|
92 |
+
|
93 |
+
protected:
|
94 |
+
Device device_;
|
95 |
+
DispatchKeySet key_set_;
|
96 |
+
PyObject* pyobj_ = nullptr;
|
97 |
+
|
98 |
+
virtual GeneratorImpl* clone_impl() const = 0;
|
99 |
+
};
|
100 |
+
|
101 |
+
namespace detail {
|
102 |
+
|
103 |
+
C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
|
104 |
+
|
105 |
+
} // namespace detail
|
106 |
+
|
107 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/SymNodeImpl.h
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <c10/util/ArrayRef.h>
|
5 |
+
#include <c10/util/Exception.h>
|
6 |
+
#include <c10/util/Optional.h>
|
7 |
+
#include <c10/util/intrusive_ptr.h>
|
8 |
+
#include <cstdint>
|
9 |
+
#include <ostream>
|
10 |
+
#include <string>
|
11 |
+
|
12 |
+
namespace c10 {
|
13 |
+
|
14 |
+
class SymNodeImpl;
|
15 |
+
using SymNode = c10::intrusive_ptr<SymNodeImpl>;
|
16 |
+
|
17 |
+
// When you add a method, you also need to edit
|
18 |
+
// torch/csrc/jit/python/init.cpp
|
19 |
+
// torch/csrc/utils/python_symnode.h
|
20 |
+
// c10/core/ConstantSymNodeImpl.h
|
21 |
+
class C10_API SymNodeImpl : public c10::intrusive_ptr_target {
|
22 |
+
public:
|
23 |
+
~SymNodeImpl() override = default;
|
24 |
+
|
25 |
+
template <typename T>
|
26 |
+
c10::intrusive_ptr<T> dyn_cast() const {
|
27 |
+
return c10::intrusive_ptr<T>::reclaim_copy(dynamic_cast<T*>(this));
|
28 |
+
}
|
29 |
+
|
30 |
+
// these could be pure virtual when we implement LTC versions
|
31 |
+
virtual bool is_int() {
|
32 |
+
TORCH_CHECK(false, "NYI");
|
33 |
+
};
|
34 |
+
virtual bool is_bool() {
|
35 |
+
TORCH_CHECK(false, "NYI");
|
36 |
+
};
|
37 |
+
virtual bool is_float() {
|
38 |
+
TORCH_CHECK(false, "NYI");
|
39 |
+
};
|
40 |
+
virtual bool is_nested_int() const {
|
41 |
+
return false;
|
42 |
+
};
|
43 |
+
virtual SymNode add(const SymNode& other) {
|
44 |
+
TORCH_CHECK(false, "NYI");
|
45 |
+
};
|
46 |
+
virtual SymNode sub(const SymNode& other) {
|
47 |
+
TORCH_CHECK(false, "NYI");
|
48 |
+
};
|
49 |
+
virtual SymNode mul(const SymNode& other) {
|
50 |
+
TORCH_CHECK(false, "NYI");
|
51 |
+
};
|
52 |
+
virtual SymNode truediv(const SymNode& other) {
|
53 |
+
TORCH_CHECK(false, "NYI");
|
54 |
+
};
|
55 |
+
virtual SymNode pow(const SymNode& other) {
|
56 |
+
TORCH_CHECK(false, "NYI");
|
57 |
+
};
|
58 |
+
virtual SymNode floordiv(const SymNode& other) {
|
59 |
+
TORCH_CHECK(false, "NYI");
|
60 |
+
};
|
61 |
+
virtual SymNode mod(const SymNode& other) {
|
62 |
+
TORCH_CHECK(false, "NYI");
|
63 |
+
};
|
64 |
+
virtual SymNode eq(const SymNode& other) {
|
65 |
+
TORCH_CHECK(false, "NYI");
|
66 |
+
};
|
67 |
+
virtual SymNode ne(const SymNode& other) {
|
68 |
+
TORCH_CHECK(false, "NYI");
|
69 |
+
};
|
70 |
+
virtual SymNode gt(const SymNode& other) {
|
71 |
+
TORCH_CHECK(false, "NYI");
|
72 |
+
};
|
73 |
+
virtual SymNode lt(const SymNode& other) {
|
74 |
+
TORCH_CHECK(false, "NYI");
|
75 |
+
};
|
76 |
+
virtual SymNode le(const SymNode& other) {
|
77 |
+
TORCH_CHECK(false, "NYI");
|
78 |
+
};
|
79 |
+
virtual SymNode ge(const SymNode& other) {
|
80 |
+
TORCH_CHECK(false, "NYI");
|
81 |
+
};
|
82 |
+
virtual SymNode ceil() {
|
83 |
+
TORCH_CHECK(false, "NYI");
|
84 |
+
};
|
85 |
+
virtual SymNode floor() {
|
86 |
+
TORCH_CHECK(false, "NYI");
|
87 |
+
};
|
88 |
+
virtual SymNode neg() {
|
89 |
+
TORCH_CHECK(false, "NYI");
|
90 |
+
};
|
91 |
+
virtual SymNode sym_min(const SymNode& other) {
|
92 |
+
TORCH_CHECK(false, "NYI");
|
93 |
+
};
|
94 |
+
virtual SymNode sym_max(const SymNode& other) {
|
95 |
+
TORCH_CHECK(false, "NYI");
|
96 |
+
};
|
97 |
+
virtual SymNode sym_or(const SymNode& other) {
|
98 |
+
TORCH_CHECK(false, "NYI");
|
99 |
+
};
|
100 |
+
virtual SymNode sym_and(const SymNode& other) {
|
101 |
+
TORCH_CHECK(false, "NYI");
|
102 |
+
};
|
103 |
+
virtual SymNode sym_not() {
|
104 |
+
TORCH_CHECK(false, "NYI");
|
105 |
+
};
|
106 |
+
virtual SymNode sym_ite(const SymNode& then_val, const SymNode& else_val) {
|
107 |
+
TORCH_CHECK(false, "NYI");
|
108 |
+
};
|
109 |
+
// NB: self is ignored here, only the arguments are used
|
110 |
+
virtual SymNode is_contiguous(
|
111 |
+
ArrayRef<SymNode> sizes,
|
112 |
+
ArrayRef<SymNode> strides) {
|
113 |
+
TORCH_CHECK(false, "NYI");
|
114 |
+
};
|
115 |
+
virtual SymNode is_channels_last_contiguous_2d(
|
116 |
+
ArrayRef<SymNode> sizes,
|
117 |
+
ArrayRef<SymNode> strides) {
|
118 |
+
TORCH_CHECK(false, "NYI");
|
119 |
+
};
|
120 |
+
virtual SymNode is_channels_last_contiguous_3d(
|
121 |
+
ArrayRef<SymNode> sizes,
|
122 |
+
ArrayRef<SymNode> strides) {
|
123 |
+
TORCH_CHECK(false, "NYI");
|
124 |
+
};
|
125 |
+
virtual SymNode is_channels_last_strides_2d(
|
126 |
+
ArrayRef<SymNode> sizes,
|
127 |
+
ArrayRef<SymNode> strides) {
|
128 |
+
TORCH_CHECK(false, "NYI");
|
129 |
+
};
|
130 |
+
virtual SymNode is_channels_last_strides_3d(
|
131 |
+
ArrayRef<SymNode> sizes,
|
132 |
+
ArrayRef<SymNode> strides) {
|
133 |
+
TORCH_CHECK(false, "NYI");
|
134 |
+
};
|
135 |
+
virtual SymNode is_non_overlapping_and_dense(
|
136 |
+
ArrayRef<SymNode> sizes,
|
137 |
+
ArrayRef<SymNode> strides) {
|
138 |
+
TORCH_CHECK(false, "NYI");
|
139 |
+
};
|
140 |
+
virtual SymNode clone() {
|
141 |
+
TORCH_CHECK(false, "NYI");
|
142 |
+
};
|
143 |
+
virtual SymNode sym_float() {
|
144 |
+
TORCH_CHECK(false, "NYI");
|
145 |
+
}
|
146 |
+
virtual SymNode wrap_int(int64_t num) {
|
147 |
+
TORCH_CHECK(false, "NYI");
|
148 |
+
};
|
149 |
+
virtual SymNode wrap_float(double num) {
|
150 |
+
TORCH_CHECK(false, "NYI");
|
151 |
+
};
|
152 |
+
virtual SymNode wrap_bool(bool num) {
|
153 |
+
TORCH_CHECK(false, "NYI");
|
154 |
+
};
|
155 |
+
virtual int64_t guard_int(const char* file, int64_t line) {
|
156 |
+
TORCH_CHECK(false, "NYI");
|
157 |
+
};
|
158 |
+
virtual bool guard_bool(const char* file, int64_t line) {
|
159 |
+
TORCH_CHECK(false, "NYI");
|
160 |
+
};
|
161 |
+
virtual double guard_float(const char* file, int64_t line) {
|
162 |
+
TORCH_CHECK(false, "NYI");
|
163 |
+
};
|
164 |
+
virtual bool guard_size_oblivious(const char* file, int64_t line) {
|
165 |
+
// No improvement for unbacked SymBools by default, replace this
|
166 |
+
// with a better implementation!
|
167 |
+
return guard_bool(file, line);
|
168 |
+
}
|
169 |
+
virtual bool expect_true(const char* file, int64_t line) {
|
170 |
+
// No improvement for unbacked SymBools by default, replace this
|
171 |
+
// with a better implementation!
|
172 |
+
return guard_bool(file, line);
|
173 |
+
};
|
174 |
+
virtual bool expect_size(const char* file, int64_t line) {
|
175 |
+
// No improvement for unbacked SymInts by default, replace this
|
176 |
+
// with a better implementation!
|
177 |
+
return ge(wrap_int(0))->guard_bool(file, line);
|
178 |
+
};
|
179 |
+
virtual int64_t int_() {
|
180 |
+
TORCH_CHECK(false, "NYI");
|
181 |
+
};
|
182 |
+
virtual bool bool_() {
|
183 |
+
TORCH_CHECK(false, "NYI");
|
184 |
+
};
|
185 |
+
virtual bool has_hint() {
|
186 |
+
TORCH_CHECK(false, "NYI");
|
187 |
+
};
|
188 |
+
virtual std::string str() {
|
189 |
+
TORCH_CHECK(false, "NYI");
|
190 |
+
};
|
191 |
+
virtual c10::optional<int64_t> nested_int() {
|
192 |
+
return c10::nullopt;
|
193 |
+
}
|
194 |
+
virtual c10::optional<int64_t> nested_int_coeff() {
|
195 |
+
return c10::nullopt;
|
196 |
+
}
|
197 |
+
virtual c10::optional<int64_t> constant_int() {
|
198 |
+
return c10::nullopt;
|
199 |
+
}
|
200 |
+
virtual c10::optional<bool> constant_bool() {
|
201 |
+
return c10::nullopt;
|
202 |
+
}
|
203 |
+
virtual c10::optional<int64_t> maybe_as_int() {
|
204 |
+
return c10::nullopt;
|
205 |
+
}
|
206 |
+
virtual bool is_constant() {
|
207 |
+
return false;
|
208 |
+
}
|
209 |
+
virtual bool is_symbolic() {
|
210 |
+
return true;
|
211 |
+
}
|
212 |
+
std::ostream& operator<<(std::ostream& os) {
|
213 |
+
os << str();
|
214 |
+
return os;
|
215 |
+
}
|
216 |
+
};
|
217 |
+
|
218 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/UndefinedTensorImpl.h
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/MemoryFormat.h>
|
4 |
+
#include <c10/core/SymIntArrayRef.h>
|
5 |
+
#include <c10/core/TensorImpl.h>
|
6 |
+
#include <c10/macros/Export.h>
|
7 |
+
#include <c10/util/ArrayRef.h>
|
8 |
+
#include <cstdint>
|
9 |
+
|
10 |
+
namespace c10 {
|
11 |
+
|
12 |
+
struct C10_API UndefinedTensorImpl final : public TensorImpl {
|
13 |
+
public:
|
14 |
+
// Without this, we get:
|
15 |
+
// error: identifier "at::UndefinedTensorImpl::_singleton" is undefined in
|
16 |
+
// device code
|
17 |
+
// (ostensibly because the constexpr tricks MSVC into trying to compile this
|
18 |
+
// function for device as well).
|
19 |
+
#ifdef _WIN32
|
20 |
+
static inline TensorImpl* singleton() {
|
21 |
+
#else
|
22 |
+
static constexpr inline TensorImpl* singleton() {
|
23 |
+
#endif
|
24 |
+
return &_singleton;
|
25 |
+
}
|
26 |
+
#ifdef DEBUG
|
27 |
+
bool has_storage() const override;
|
28 |
+
#endif
|
29 |
+
void set_storage_offset(int64_t offset) override;
|
30 |
+
|
31 |
+
protected:
|
32 |
+
bool is_contiguous_custom(MemoryFormat format) const override;
|
33 |
+
IntArrayRef strides_custom() const override;
|
34 |
+
SymIntArrayRef sym_strides_custom() const override;
|
35 |
+
|
36 |
+
private:
|
37 |
+
UndefinedTensorImpl();
|
38 |
+
static UndefinedTensorImpl _singleton;
|
39 |
+
const char* tensorimpl_type_name() const override;
|
40 |
+
};
|
41 |
+
|
42 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COW.h
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Macros.h>
|
4 |
+
#include <c10/util/intrusive_ptr.h>
|
5 |
+
|
6 |
+
namespace c10 {
|
7 |
+
struct StorageImpl;
|
8 |
+
class DataPtr;
|
9 |
+
}; // namespace c10
|
10 |
+
|
11 |
+
namespace c10::impl::cow {
|
12 |
+
|
13 |
+
// Creates a Copy-on-write (COW) clone of the given storage. This will also
|
14 |
+
// convert the given storage into a COW storage if it is not COW already.
|
15 |
+
//
|
16 |
+
// Converting the storage into a COW storage will not be successful if the
|
17 |
+
// storage's DataPtr has some context (`DataPtr::get_context()`) which is not
|
18 |
+
// equal to the data pointer (`DataPtr::get()`). In this case, a nullptr is
|
19 |
+
// returned.
|
20 |
+
C10_API c10::intrusive_ptr<StorageImpl> lazy_clone_storage(
|
21 |
+
StorageImpl& storage);
|
22 |
+
|
23 |
+
// Check if a storage has a simple DataPtr with no abnormal context
|
24 |
+
C10_API bool has_simple_data_ptr(const c10::StorageImpl& storage);
|
25 |
+
|
26 |
+
// Check if a DataPtr is COW
|
27 |
+
C10_API bool is_cow_data_ptr(const c10::DataPtr& data_ptr);
|
28 |
+
|
29 |
+
// Eagerly copies a COW storage's data, turning it into a non-COW storage.
|
30 |
+
C10_API void materialize_cow_storage(StorageImpl& storage);
|
31 |
+
|
32 |
+
} // namespace c10::impl::cow
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/COWDeleter.h
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <c10/util/UniqueVoidPtr.h>
|
5 |
+
|
6 |
+
#include <atomic>
|
7 |
+
#include <cstdint>
|
8 |
+
#include <memory>
|
9 |
+
#include <shared_mutex>
|
10 |
+
#include <variant>
|
11 |
+
|
12 |
+
namespace c10::impl::cow {
|
13 |
+
|
14 |
+
// A COWDeleterContext object is used as the `ctx` argument for DataPtr
|
15 |
+
// to implement a Copy-on-write (COW) DataPtr.
|
16 |
+
class C10_API COWDeleterContext {
|
17 |
+
public:
|
18 |
+
// Creates an instance, holding the pair of data and original
|
19 |
+
// deleter.
|
20 |
+
//
|
21 |
+
// Note that the deleter will only be called in our destructor if
|
22 |
+
// the last reference to this goes away without getting
|
23 |
+
// materialized.
|
24 |
+
explicit COWDeleterContext(std::unique_ptr<void, DeleterFnPtr> data);
|
25 |
+
|
26 |
+
// Increments the current refcount.
|
27 |
+
void increment_refcount();
|
28 |
+
|
29 |
+
// See README.md in this directory to understand the locking
|
30 |
+
// strategy.
|
31 |
+
|
32 |
+
// Represents a reference to the context.
|
33 |
+
//
|
34 |
+
// This is returned by decrement_refcount to allow the caller to
|
35 |
+
// copy the data under the shared lock.
|
36 |
+
using NotLastReference = std::shared_lock<std::shared_mutex>;
|
37 |
+
|
38 |
+
// Represents the last reference to the context.
|
39 |
+
//
|
40 |
+
// This will be returned by decrement_refcount when it is the last
|
41 |
+
// reference remaining and after any pending copies have completed.
|
42 |
+
using LastReference = std::unique_ptr<void, DeleterFnPtr>;
|
43 |
+
|
44 |
+
// Decrements the refcount, returning a handle indicating what to
|
45 |
+
// do with it.
|
46 |
+
std::variant<NotLastReference, LastReference> decrement_refcount();
|
47 |
+
|
48 |
+
private:
|
49 |
+
// The destructor is hidden, this should only ever be used within
|
50 |
+
// UniqueVoidPtr using cow::delete_context as the deleter.
|
51 |
+
~COWDeleterContext();
|
52 |
+
|
53 |
+
std::shared_mutex mutex_;
|
54 |
+
std::unique_ptr<void, DeleterFnPtr> data_;
|
55 |
+
std::atomic<std::int64_t> refcount_ = 1;
|
56 |
+
};
|
57 |
+
|
58 |
+
// `cow_deleter` is used as the `ctx_deleter` for DataPtr to implement a COW
|
59 |
+
// DataPtr.
|
60 |
+
//
|
61 |
+
// Warning: This should only be called on a pointer to a COWDeleterContext that
|
62 |
+
// was allocated on the heap with `new`, because when the refcount reaches 0,
|
63 |
+
// the context is deleted with `delete`.
|
64 |
+
C10_API void cow_deleter(void* ctx);
|
65 |
+
|
66 |
+
} // namespace c10::impl::cow
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/DeviceGuardImplInterface.h
ADDED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/DeviceType.h>
|
5 |
+
#include <c10/core/Stream.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
|
8 |
+
// Just for C10_ANONYMOUS_VARIABLE
|
9 |
+
#include <c10/util/Registry.h>
|
10 |
+
|
11 |
+
#include <atomic>
|
12 |
+
|
13 |
+
namespace c10 {
|
14 |
+
|
15 |
+
// Forward declaration
|
16 |
+
class DataPtr;
|
17 |
+
|
18 |
+
/**
|
19 |
+
* Flags defining the behavior of events.
|
20 |
+
*
|
21 |
+
* PYTORCH_DEFAULT and BACKEND_DEFAULT are valid for all backends. The
|
22 |
+
* BACKEND_DEFAULT is what a particular backend would select if no
|
23 |
+
* flags were given. PYTORCH_DEFAULT is the PyTorch's framework default
|
24 |
+
* choice for events on that backend, which may not be the same. For example,
|
25 |
+
* when PyTorch creates a CUDA event it sets the flag
|
26 |
+
* CUDA_EVENT_DISABLING_TIMING by default to improve performance.
|
27 |
+
*
|
28 |
+
* The mapping of PYTORCH_DEFAULT and BACKEND_DEFAULT is done by each
|
29 |
+
* backend implementation. Backend-specific flags, like CUDA_EVENT_DEFAULT,
|
30 |
+
* should map one-to-one with actual event flags for those backends.
|
31 |
+
*/
|
32 |
+
enum class EventFlag {
|
33 |
+
PYTORCH_DEFAULT,
|
34 |
+
BACKEND_DEFAULT,
|
35 |
+
// CUDA flags
|
36 |
+
CUDA_EVENT_DEFAULT,
|
37 |
+
CUDA_EVENT_DISABLE_TIMING, // PyTorch-default for CUDA
|
38 |
+
// HIP flags
|
39 |
+
HIP_EVENT_DEFAULT,
|
40 |
+
HIP_EVENT_DISABLE_TIMING, // PyTorch-default for HIP
|
41 |
+
// FOR TESTING ONLY
|
42 |
+
INVALID
|
43 |
+
};
|
44 |
+
|
45 |
+
namespace impl {
|
46 |
+
|
47 |
+
/**
|
48 |
+
* DeviceGuardImplInterface represents the virtual interface which provides
|
49 |
+
* functionality to provide an RAII class for device and stream switching,
|
50 |
+
* via DeviceGuard. Every distinct device type, e.g., CUDA and HIP, is
|
51 |
+
* expected to implement and register an implementation of this interface.
|
52 |
+
* All classes which inherit from DeviceGuardImplInterface should be declared
|
53 |
+
* 'final'.
|
54 |
+
*
|
55 |
+
* This class exists because we provide a unified interface for performing
|
56 |
+
* device guards via DeviceGuard, but we cannot assume that we have actually
|
57 |
+
* compiled against the, e.g., CUDA library, which actually implements
|
58 |
+
* this guard functionality. In this case, a dynamic dispatch is required
|
59 |
+
* to cross the library boundary.
|
60 |
+
*
|
61 |
+
* If possible, you should directly use implementations of this interface;
|
62 |
+
* those uses will be devirtualized.
|
63 |
+
*/
|
64 |
+
struct C10_API DeviceGuardImplInterface {
|
65 |
+
DeviceGuardImplInterface() = default;
|
66 |
+
DeviceGuardImplInterface(const DeviceGuardImplInterface&) = default;
|
67 |
+
DeviceGuardImplInterface& operator=(const DeviceGuardImplInterface&) =
|
68 |
+
default;
|
69 |
+
DeviceGuardImplInterface(DeviceGuardImplInterface&&) noexcept = default;
|
70 |
+
DeviceGuardImplInterface& operator=(DeviceGuardImplInterface&&) noexcept =
|
71 |
+
default;
|
72 |
+
|
73 |
+
/**
|
74 |
+
* Return the type of device managed by this guard implementation.
|
75 |
+
*/
|
76 |
+
virtual DeviceType type() const = 0;
|
77 |
+
|
78 |
+
/**
|
79 |
+
* Set the current device to Device, and return the previous Device.
|
80 |
+
*/
|
81 |
+
virtual Device exchangeDevice(Device) const = 0;
|
82 |
+
// NB: Implementations of exchangeDevice can be a bit boilerplatey. You might
|
83 |
+
// consider replacing exchangeDevice with a non-virtual function with a baked
|
84 |
+
// in implementation; however, note that this will triple the number of
|
85 |
+
// virtual calls (when you implement exchangeDevice in a final subclass,
|
86 |
+
// the compiler gets to devirtualize everything; it won't do that if you don't
|
87 |
+
// define it in the subclass!) A common way to solve this problem is to use
|
88 |
+
// some sort of CRTP; however, we can template DeviceGuardImplInterface since
|
89 |
+
// we really *do* need it to be virtual. A little boilerplate seems easiest
|
90 |
+
// to explain. (Another way around this problem is to provide inline
|
91 |
+
// functions that provide the default implementations, but this seems a little
|
92 |
+
// hard to explain. In any case, we're only going to have on order of ten
|
93 |
+
// implementations of this anyway.)
|
94 |
+
|
95 |
+
/**
|
96 |
+
* Get the current device.
|
97 |
+
*/
|
98 |
+
virtual Device getDevice() const = 0;
|
99 |
+
|
100 |
+
/**
|
101 |
+
* Set the current device to Device.
|
102 |
+
*/
|
103 |
+
virtual void setDevice(Device) const = 0;
|
104 |
+
|
105 |
+
/**
|
106 |
+
* Set the current device to Device, without checking for errors
|
107 |
+
* (so, e.g., this can be called from a destructor).
|
108 |
+
*/
|
109 |
+
virtual void uncheckedSetDevice(Device) const noexcept = 0;
|
110 |
+
|
111 |
+
/**
|
112 |
+
* Get the current stream for a given device.
|
113 |
+
*/
|
114 |
+
virtual Stream getStream(Device) const noexcept = 0;
|
115 |
+
|
116 |
+
/**
|
117 |
+
* Get the default stream for a given device.
|
118 |
+
*/
|
119 |
+
virtual Stream getDefaultStream(Device) const {
|
120 |
+
TORCH_CHECK(false, "Backend doesn't support acquiring a default stream.")
|
121 |
+
}
|
122 |
+
|
123 |
+
/**
|
124 |
+
* Get a stream from the global pool for a given device.
|
125 |
+
*/
|
126 |
+
virtual Stream getStreamFromGlobalPool(Device, bool isHighPriority = false)
|
127 |
+
const {
|
128 |
+
(void)isHighPriority; // Suppress unused variable warning
|
129 |
+
TORCH_CHECK(false, "Backend doesn't support acquiring a stream from pool.")
|
130 |
+
}
|
131 |
+
|
132 |
+
/**
|
133 |
+
* Set a stream to be the thread local current stream for its device.
|
134 |
+
* Return the previous stream for that device. You are NOT required
|
135 |
+
* to set the current device to match the device of this stream.
|
136 |
+
*/
|
137 |
+
virtual Stream exchangeStream(Stream) const noexcept = 0;
|
138 |
+
|
139 |
+
/**
|
140 |
+
* Destroys the given event.
|
141 |
+
*/
|
142 |
+
virtual void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
|
143 |
+
const noexcept {}
|
144 |
+
|
145 |
+
/**
|
146 |
+
* Increments the event's version and enqueues a job with this version
|
147 |
+
* in the stream's work queue. When the stream process that job
|
148 |
+
* it notifies all streams waiting on / blocked by that version of the
|
149 |
+
* event to continue and marks that version as recorded.
|
150 |
+
* */
|
151 |
+
virtual void record(
|
152 |
+
void** /*event*/,
|
153 |
+
const Stream& /*stream*/,
|
154 |
+
const DeviceIndex /*device_index*/,
|
155 |
+
const c10::EventFlag /*flag*/) const {
|
156 |
+
TORCH_CHECK(false, "Backend doesn't support events.");
|
157 |
+
}
|
158 |
+
|
159 |
+
/**
|
160 |
+
* Does nothing if the event has not been scheduled to be recorded.
|
161 |
+
* If the event was previously enqueued to be recorded, a command
|
162 |
+
* to wait for the version of the event that exists at the time of this call
|
163 |
+
* is inserted in the stream's work queue.
|
164 |
+
* When the stream reaches this command it will stop processing
|
165 |
+
* additional commands until that version of the event is marked as recorded.
|
166 |
+
*/
|
167 |
+
virtual void block(void* /*event*/, const Stream& /*stream*/) const {
|
168 |
+
TORCH_CHECK(false, "Backend doesn't support events.");
|
169 |
+
}
|
170 |
+
|
171 |
+
/**
|
172 |
+
* Returns true if (and only if)
|
173 |
+
* (1) the event has never been scheduled to be recorded
|
174 |
+
* (2) the current version is marked as recorded.
|
175 |
+
* Returns false otherwise.
|
176 |
+
*/
|
177 |
+
virtual bool queryEvent(void* /*event*/) const {
|
178 |
+
TORCH_CHECK(false, "Backend doesn't support events.");
|
179 |
+
}
|
180 |
+
|
181 |
+
/**
|
182 |
+
* Get the number of devices. WARNING: This is REQUIRED to not raise
|
183 |
+
* an exception. If there is some sort of problem, e.g., driver error,
|
184 |
+
* you should report that there are zero available devices.
|
185 |
+
*/
|
186 |
+
virtual DeviceIndex deviceCount() const noexcept = 0;
|
187 |
+
|
188 |
+
/**
|
189 |
+
* Return true if all the work previously enqueued on the stream for
|
190 |
+
* asynchronous execution has completed running on the device.
|
191 |
+
*/
|
192 |
+
virtual bool queryStream(const Stream& /*stream*/) const {
|
193 |
+
TORCH_CHECK(false, "Backend doesn't support querying streams.");
|
194 |
+
}
|
195 |
+
|
196 |
+
/**
|
197 |
+
* Wait (by blocking the calling thread) until all the work previously
|
198 |
+
* enqueued on the stream has completed running on the device.
|
199 |
+
*/
|
200 |
+
virtual void synchronizeStream(const Stream& /*stream*/) const {
|
201 |
+
TORCH_CHECK(false, "Backend doesn't support synchronizing streams.");
|
202 |
+
}
|
203 |
+
|
204 |
+
/**
|
205 |
+
* Ensure the caching allocator (if any) is aware that the given DataPtr is
|
206 |
+
* being used on the given stream, and that it should thus avoid recycling the
|
207 |
+
* DataPtr until all work on that stream is done.
|
208 |
+
*/
|
209 |
+
virtual void recordDataPtrOnStream(const c10::DataPtr&, const Stream&) const {
|
210 |
+
}
|
211 |
+
|
212 |
+
/**
|
213 |
+
* Intended use of this class is to leak the DeviceGuardImpl at program end.
|
214 |
+
* So you better not call the destructor, buster!
|
215 |
+
*/
|
216 |
+
virtual ~DeviceGuardImplInterface() = default;
|
217 |
+
};
|
218 |
+
|
219 |
+
// A no-op device guard impl that doesn't do anything interesting. Useful
|
220 |
+
// for devices that don't actually have a concept of device index. Prominent
|
221 |
+
// examples are CPU and Meta.
|
222 |
+
template <DeviceType D>
|
223 |
+
struct NoOpDeviceGuardImpl final : public DeviceGuardImplInterface {
|
224 |
+
NoOpDeviceGuardImpl() = default;
|
225 |
+
DeviceType type() const override {
|
226 |
+
return D;
|
227 |
+
}
|
228 |
+
Device exchangeDevice(Device) const override {
|
229 |
+
return Device(D, -1); // no-op
|
230 |
+
}
|
231 |
+
Device getDevice() const override {
|
232 |
+
return Device(D, -1);
|
233 |
+
}
|
234 |
+
void setDevice(Device) const override {
|
235 |
+
// no-op
|
236 |
+
}
|
237 |
+
void uncheckedSetDevice(Device) const noexcept override {
|
238 |
+
// no-op
|
239 |
+
}
|
240 |
+
Stream getStream(Device) const noexcept override {
|
241 |
+
// no-op
|
242 |
+
return Stream(Stream::DEFAULT, Device(D, -1));
|
243 |
+
}
|
244 |
+
// NB: These do NOT set the current device
|
245 |
+
Stream exchangeStream(Stream) const noexcept override {
|
246 |
+
// no-op
|
247 |
+
return Stream(Stream::DEFAULT, Device(D, -1));
|
248 |
+
}
|
249 |
+
DeviceIndex deviceCount() const noexcept override {
|
250 |
+
return 1;
|
251 |
+
}
|
252 |
+
|
253 |
+
// Event-related functions
|
254 |
+
void record(
|
255 |
+
void** /*event*/,
|
256 |
+
const Stream& /*stream*/,
|
257 |
+
const DeviceIndex /*device_index*/,
|
258 |
+
const EventFlag /*flag*/) const override {
|
259 |
+
TORCH_CHECK(false, D, " backend doesn't support events.");
|
260 |
+
}
|
261 |
+
void block(void* /*event*/, const Stream& /*stream*/) const override {
|
262 |
+
TORCH_CHECK(false, D, " backend doesn't support events.")
|
263 |
+
}
|
264 |
+
bool queryEvent(void* /*event*/) const override {
|
265 |
+
TORCH_CHECK(false, D, " backend doesn't support events.")
|
266 |
+
}
|
267 |
+
void destroyEvent(void* /*event*/, const DeviceIndex /*device_index*/)
|
268 |
+
const noexcept override {}
|
269 |
+
|
270 |
+
// Stream-related functions
|
271 |
+
bool queryStream(const Stream& /*stream*/) const override {
|
272 |
+
return true;
|
273 |
+
}
|
274 |
+
void synchronizeStream(const Stream& /*stream*/) const override {
|
275 |
+
// Don't wait for anything.
|
276 |
+
}
|
277 |
+
};
|
278 |
+
|
279 |
+
// The registry is NON-owning. Each stored pointer is std::atomic so
|
280 |
+
// that under all interleavings of registry calls the structure is
|
281 |
+
// race-free. This doesn't cost us anything on reads in X86. (An
|
282 |
+
// unsynchronized implementation probably is OK too, but I didn't want
|
283 |
+
// to prove that we never read from device_guard_impl_registry at the
|
284 |
+
// same time some registration is occurring. Shiver.)
|
285 |
+
//
|
286 |
+
// I'd like this registry to be valid even at program destruction time
|
287 |
+
// (in case someone uses a DeviceGuard in a destructor to do some cleanup
|
288 |
+
// in the CUDA API.) Since there are no direct accesses of the underlying
|
289 |
+
// owning objects which I can use to enforce initialization order (unlike
|
290 |
+
// in a Meyer singleton), it implies that you must *leak* objects when
|
291 |
+
// putting them in the registry. This is done by deleting the destructor
|
292 |
+
// on DeviceGuardImplInterface.
|
293 |
+
// NOLINTNEXTLINE(*c-arrays*)
|
294 |
+
extern C10_API std::atomic<const DeviceGuardImplInterface*>
|
295 |
+
device_guard_impl_registry[static_cast<size_t>(
|
296 |
+
DeviceType::COMPILE_TIME_MAX_DEVICE_TYPES)];
|
297 |
+
|
298 |
+
// I can't conveniently use c10/util/Registry.h for the following reason:
|
299 |
+
// c10/util/Registry.h gives me a slow way of Create'ing a object of some
|
300 |
+
// interface from the registry, but no way of quickly accessing an already
|
301 |
+
// created object. I'll be banging on getDeviceGuardImpl every time we do a
|
302 |
+
// DeviceGuard, so I really don't want to be doing an unordered_map lookup.
|
303 |
+
// Better if the registration mechanism directly drops its implementation
|
304 |
+
// into device_guard_impl_registry.
|
305 |
+
|
306 |
+
class C10_API DeviceGuardImplRegistrar {
|
307 |
+
public:
|
308 |
+
DeviceGuardImplRegistrar(DeviceType, const DeviceGuardImplInterface*);
|
309 |
+
};
|
310 |
+
|
311 |
+
#define C10_REGISTER_GUARD_IMPL(DevType, DeviceGuardImpl) \
|
312 |
+
static ::c10::impl::DeviceGuardImplRegistrar C10_ANONYMOUS_VARIABLE( \
|
313 |
+
g_##DeviceType)(::c10::DeviceType::DevType, new DeviceGuardImpl());
|
314 |
+
|
315 |
+
inline const DeviceGuardImplInterface* getDeviceGuardImpl(DeviceType type) {
|
316 |
+
// Two adjacent int16_t fields DeviceType and DeviceIndex has field access
|
317 |
+
// miscompiled on NVCC. To workaround this issue, we apply a mask to the
|
318 |
+
// DeviceType. First check if the DeviceType is 16-bit.
|
319 |
+
// FB employees can see
|
320 |
+
// https://fb.workplace.com/groups/llvm.gcc/permalink/4053565044692080/
|
321 |
+
// for more details
|
322 |
+
static_assert(sizeof(DeviceType) == 1, "DeviceType is not 8-bit");
|
323 |
+
auto p = device_guard_impl_registry[static_cast<size_t>(type) & 0xFF].load();
|
324 |
+
|
325 |
+
// This seems to be the first place where you make use of a device
|
326 |
+
// when you pass devices to factory functions. Give a nicer error
|
327 |
+
// message in this case.
|
328 |
+
TORCH_CHECK(p, "PyTorch is not linked with support for ", type, " devices");
|
329 |
+
return p;
|
330 |
+
}
|
331 |
+
|
332 |
+
inline bool hasDeviceGuardImpl(DeviceType type) {
|
333 |
+
return device_guard_impl_registry[static_cast<size_t>(type)].load();
|
334 |
+
}
|
335 |
+
|
336 |
+
} // namespace impl
|
337 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/FakeGuardImpl.h
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
4 |
+
|
5 |
+
#include <array>
|
6 |
+
|
7 |
+
namespace c10::impl {
|
8 |
+
|
9 |
+
// FakeGuardImpl is hardcoded to have eight devices. Not for
|
10 |
+
// any good reason, just to simplify code.
|
11 |
+
constexpr DeviceIndex kFakeGuardImplMaxDevices = 8;
|
12 |
+
|
13 |
+
/**
|
14 |
+
* A fake implementation of DeviceGuardImplInterface suitable for testing.
|
15 |
+
* The current device is modeled as a mutable field in the guard implementation
|
16 |
+
* class. See DeviceGuard_test.cpp for an example use.
|
17 |
+
*/
|
18 |
+
template <DeviceType T>
|
19 |
+
struct FakeGuardImpl final : public DeviceGuardImplInterface {
|
20 |
+
static constexpr DeviceType static_type = T;
|
21 |
+
// Runtime device type is not used
|
22 |
+
FakeGuardImpl(DeviceType) {}
|
23 |
+
FakeGuardImpl() = default;
|
24 |
+
DeviceType type() const override {
|
25 |
+
return T;
|
26 |
+
}
|
27 |
+
Device exchangeDevice(Device d) const override {
|
28 |
+
AT_ASSERT(d.type() == type());
|
29 |
+
AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
|
30 |
+
Device old_device = getDevice();
|
31 |
+
if (old_device.index() != d.index()) {
|
32 |
+
current_device_ = d.index();
|
33 |
+
}
|
34 |
+
return old_device;
|
35 |
+
}
|
36 |
+
Device getDevice() const override {
|
37 |
+
return Device(type(), current_device_);
|
38 |
+
}
|
39 |
+
void setDevice(Device d) const override {
|
40 |
+
AT_ASSERT(d.type() == type());
|
41 |
+
AT_ASSERT(d.index() >= 0);
|
42 |
+
AT_ASSERT(d.index() < kFakeGuardImplMaxDevices);
|
43 |
+
current_device_ = d.index();
|
44 |
+
}
|
45 |
+
void uncheckedSetDevice(Device d) const noexcept override {
|
46 |
+
current_device_ = d.index();
|
47 |
+
}
|
48 |
+
Stream getStream(Device d) const noexcept override {
|
49 |
+
return Stream(Stream::UNSAFE, d, current_streams_[d.index()]);
|
50 |
+
}
|
51 |
+
Stream exchangeStream(Stream s) const noexcept override {
|
52 |
+
auto old_id = current_streams_[s.device_index()];
|
53 |
+
current_streams_[s.device_index()] = s.id();
|
54 |
+
return Stream(Stream::UNSAFE, s.device(), old_id);
|
55 |
+
}
|
56 |
+
DeviceIndex deviceCount() const noexcept override {
|
57 |
+
return kFakeGuardImplMaxDevices;
|
58 |
+
}
|
59 |
+
|
60 |
+
// Event-related functions
|
61 |
+
void record(
|
62 |
+
void** event,
|
63 |
+
const Stream& stream,
|
64 |
+
const DeviceIndex device_index,
|
65 |
+
const EventFlag flag) const override {}
|
66 |
+
void block(void* event, const Stream& stream) const override {}
|
67 |
+
bool queryEvent(void* event) const override {
|
68 |
+
return true;
|
69 |
+
}
|
70 |
+
void destroyEvent(void* event, const DeviceIndex device_index)
|
71 |
+
const noexcept override {}
|
72 |
+
|
73 |
+
// Convenience methods for testing
|
74 |
+
static DeviceIndex getDeviceIndex() {
|
75 |
+
return current_device_;
|
76 |
+
}
|
77 |
+
static void setDeviceIndex(DeviceIndex i) {
|
78 |
+
AT_ASSERT(i >= 0);
|
79 |
+
AT_ASSERT(i < kFakeGuardImplMaxDevices);
|
80 |
+
current_device_ = i;
|
81 |
+
}
|
82 |
+
static StreamId getCurrentStreamIdFor(DeviceIndex i) {
|
83 |
+
return current_streams_.at(i);
|
84 |
+
}
|
85 |
+
static void resetStreams() {
|
86 |
+
current_streams_.fill(0);
|
87 |
+
}
|
88 |
+
|
89 |
+
private:
|
90 |
+
thread_local static DeviceIndex current_device_;
|
91 |
+
thread_local static std::array<StreamId, kFakeGuardImplMaxDevices>
|
92 |
+
current_streams_;
|
93 |
+
};
|
94 |
+
|
95 |
+
template <DeviceType T>
|
96 |
+
thread_local DeviceIndex FakeGuardImpl<T>::current_device_ = 0;
|
97 |
+
|
98 |
+
template <DeviceType T>
|
99 |
+
thread_local std::array<StreamId, kFakeGuardImplMaxDevices>
|
100 |
+
FakeGuardImpl<T>::current_streams_ = {0, 0, 0, 0, 0, 0, 0, 0};
|
101 |
+
|
102 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/GPUTrace.h
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/impl/PyInterpreter.h>
|
4 |
+
|
5 |
+
namespace c10::impl {
|
6 |
+
|
7 |
+
struct C10_API GPUTrace {
|
8 |
+
// On the x86 architecture the atomic operations are lock-less.
|
9 |
+
static std::atomic<const PyInterpreter*> gpuTraceState;
|
10 |
+
|
11 |
+
// When PyTorch migrates to C++20, this should be changed to an atomic flag.
|
12 |
+
// Currently, the access to this variable is not synchronized, on the basis
|
13 |
+
// that it will only be flipped once and by the first interpreter that
|
14 |
+
// accesses it.
|
15 |
+
static bool haveState;
|
16 |
+
|
17 |
+
// This function will only register the first interpreter that tries to invoke
|
18 |
+
// it. For all of the next ones it will be a no-op.
|
19 |
+
static void set_trace(const PyInterpreter*);
|
20 |
+
|
21 |
+
static const PyInterpreter* get_trace() {
|
22 |
+
if (!haveState)
|
23 |
+
return nullptr;
|
24 |
+
return gpuTraceState.load(std::memory_order_acquire);
|
25 |
+
}
|
26 |
+
};
|
27 |
+
|
28 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/HermeticPyObjectTLS.h
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
#include <atomic>
|
5 |
+
|
6 |
+
namespace c10::impl {
|
7 |
+
|
8 |
+
// This TLS controls whether or not we permanently associate PyObject
|
9 |
+
// with Tensor the first time it is allocated. When hermetic PyObject
|
10 |
+
// TLS is enabled (state is true), we DO NOT save PyObjects to Tensor,
|
11 |
+
// meaning you get a distinct PyObject whenever you execute the code in
|
12 |
+
// question.
|
13 |
+
struct C10_API HermeticPyObjectTLS {
|
14 |
+
static void set_state(bool state);
|
15 |
+
static bool get_state() {
|
16 |
+
// Hypothetical fastpath if torchdeploy/multipy isn't used. Per
|
17 |
+
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2020/p2055r0.pdf
|
18 |
+
// this qualifies relaxed access because it is a single-location data
|
19 |
+
// structure (only the boolean here).
|
20 |
+
//
|
21 |
+
// Forgetting about data races for a moment, is there a logical race?
|
22 |
+
//
|
23 |
+
// - Boolean only ever transitions from false to true. So the
|
24 |
+
// critical situation is when one interpreter is already running
|
25 |
+
// when a second interpreter switches haveState from false to true.
|
26 |
+
//
|
27 |
+
// - The first interpreter is indifferent whether or not it sees
|
28 |
+
// hasState true/false; obviously false works (this is what the
|
29 |
+
// interpreter was previously using; more directly, the interpreter
|
30 |
+
// calls into itself as the handler, so being hermetic is not
|
31 |
+
// required), and true simply means serviced python operator calls will
|
32 |
+
// be hermetic; in these cases it is expected to be functionally
|
33 |
+
// equivalent.
|
34 |
+
//
|
35 |
+
// - The second interpreter MUST see hasState true (as its requests will
|
36 |
+
// be forwarded to the first interpreter), but it is assumed that there
|
37 |
+
// is a synchronization between the interpreter initialization, and
|
38 |
+
// when we actually perform operations, so it is guaranteed to see
|
39 |
+
// hasState true.
|
40 |
+
//
|
41 |
+
// QED.
|
42 |
+
//
|
43 |
+
// This fastpath is currently disabled so that we can more easily test that
|
44 |
+
// hermetic mode works correctly even on stock build of PyTorch.
|
45 |
+
if (false && !haveState_.load(std::memory_order_relaxed))
|
46 |
+
return false;
|
47 |
+
return get_tls_state();
|
48 |
+
}
|
49 |
+
// Call this from the multipy/torchdeploy top level
|
50 |
+
static void init_state();
|
51 |
+
|
52 |
+
private:
|
53 |
+
// This only flipped once from false to true during torchdeploy/multipy
|
54 |
+
// initialization, and never again.
|
55 |
+
static std::atomic<bool> haveState_;
|
56 |
+
static bool get_tls_state();
|
57 |
+
};
|
58 |
+
|
59 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineDeviceGuard.h
ADDED
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// This file provides implementations of InlineDeviceGuard and
|
4 |
+
// InlineOptionalDeviceGuard.
|
5 |
+
|
6 |
+
#include <c10/core/Device.h>
|
7 |
+
#include <c10/core/DeviceType.h>
|
8 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
9 |
+
#include <c10/core/impl/VirtualGuardImpl.h>
|
10 |
+
#include <c10/util/Exception.h>
|
11 |
+
#include <c10/util/Optional.h>
|
12 |
+
#include <type_traits>
|
13 |
+
#include <utility>
|
14 |
+
|
15 |
+
namespace c10::impl {
|
16 |
+
|
17 |
+
/**
|
18 |
+
* A DeviceGuard is an RAII class that sets a device to some value
|
19 |
+
* on construction, and resets the device to its original value on
|
20 |
+
* destruction.
|
21 |
+
*
|
22 |
+
* InlineDeviceGuard is a helper class for implementing DeviceGuards.
|
23 |
+
* It is templated over a DeviceGuardImpl (anything that implements
|
24 |
+
* DeviceGuardImplInterface). There are two primary ways to instantiate
|
25 |
+
* InlineDeviceGuard:
|
26 |
+
*
|
27 |
+
* - With a concrete implementation of DeviceGuardImpl, e.g., CUDAGuardImpl.
|
28 |
+
* This is the best way to use InlineDeviceGuard, as all calls are
|
29 |
+
* devirtualized, giving you code as efficient as straight line
|
30 |
+
* calls to cudaGetDevice/cudaSetDevice.
|
31 |
+
*
|
32 |
+
* - With VirtualGuardImpl, which does a virtual dispatch to a DeviceGuardImpl
|
33 |
+
* retrieved from a DeviceType registry. We have explicitly instantiated
|
34 |
+
* InlineDeviceGuard this way as c10::DeviceGuard.
|
35 |
+
*
|
36 |
+
* If you are in a hurry, you can use InlineDeviceGuard directly:
|
37 |
+
*
|
38 |
+
* using CUDAGuard = impl::InlineDeviceGuard<CUDAGuardImpl>;
|
39 |
+
*
|
40 |
+
* However, you can provide a better user experience if you explicitly write a
|
41 |
+
* wrapper class that itself contains the template instantiation:
|
42 |
+
*
|
43 |
+
* class CUDAGuard {
|
44 |
+
* public:
|
45 |
+
* // ... the API ...
|
46 |
+
* private:
|
47 |
+
* impl::InlineDeviceGuard<CUDAGuardImpl> guard_;
|
48 |
+
* }
|
49 |
+
*
|
50 |
+
* The wrapper class provides a good place to write documentation, and helps
|
51 |
+
* avoid weird template instantiation errors when a user incorrectly uses the
|
52 |
+
* class.
|
53 |
+
*
|
54 |
+
* If you need to test this class, consider instantiating it with FakeGuardImpl.
|
55 |
+
*/
|
56 |
+
template <typename T>
|
57 |
+
class InlineDeviceGuard {
|
58 |
+
public:
|
59 |
+
// Note [Omitted default constructor from RAII]
|
60 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
61 |
+
// In principle, we could add a default constructor to
|
62 |
+
// DeviceGuard which reads the current device and promises to
|
63 |
+
// restore to that device on exit. However, most cases where you
|
64 |
+
// would have written this, you probably meant to actually just
|
65 |
+
// use OptionalDeviceGuard (since you don't actually need the
|
66 |
+
// restore to happen if you don't ever actually set the device).
|
67 |
+
// We remove the constructor here to encourage you to think about
|
68 |
+
// what you actually want to happen.
|
69 |
+
explicit InlineDeviceGuard() = delete;
|
70 |
+
|
71 |
+
/// Set the current device to the passed Device.
|
72 |
+
explicit InlineDeviceGuard(Device device)
|
73 |
+
: impl_(device.type()),
|
74 |
+
original_device_(
|
75 |
+
device.index() == -1 ? impl_.getDevice()
|
76 |
+
: impl_.exchangeDevice(device)),
|
77 |
+
current_device_(device.index() == -1 ? original_device_ : device) {}
|
78 |
+
|
79 |
+
/// Set the current device index to the passed DeviceIndex. (The
|
80 |
+
/// device type is inferred from the template parameter T).
|
81 |
+
template <
|
82 |
+
typename U = T,
|
83 |
+
typename =
|
84 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
|
85 |
+
explicit InlineDeviceGuard(DeviceIndex device_index)
|
86 |
+
: InlineDeviceGuard(Device(U::static_type, device_index)) {}
|
87 |
+
|
88 |
+
/// Construct an InlineDeviceGuard using VirtualGuardImpl with an explicit
|
89 |
+
/// DeviceGuardImplInterface pointer.
|
90 |
+
template <
|
91 |
+
typename U = T,
|
92 |
+
typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
|
93 |
+
explicit InlineDeviceGuard(
|
94 |
+
Device device,
|
95 |
+
const DeviceGuardImplInterface* impl)
|
96 |
+
: impl_(
|
97 |
+
VirtualGuardImpl(impl ? impl : getDeviceGuardImpl(device.type()))),
|
98 |
+
original_device_(
|
99 |
+
device.index() == -1 ? impl_.getDevice()
|
100 |
+
: impl_.exchangeDevice(device)),
|
101 |
+
current_device_(device.index() == -1 ? original_device_ : device) {}
|
102 |
+
|
103 |
+
/// Copy is disallowed
|
104 |
+
InlineDeviceGuard(const InlineDeviceGuard<T>&) = delete;
|
105 |
+
InlineDeviceGuard<T>& operator=(const InlineDeviceGuard<T>&) = delete;
|
106 |
+
|
107 |
+
/// Move is disallowed, as DeviceGuard does not have an uninitialized state,
|
108 |
+
/// which is required for moves on types with nontrivial destructors.
|
109 |
+
InlineDeviceGuard(InlineDeviceGuard<T>&& other) = delete;
|
110 |
+
InlineDeviceGuard& operator=(InlineDeviceGuard<T>&& other) = delete;
|
111 |
+
|
112 |
+
~InlineDeviceGuard() {
|
113 |
+
impl_.uncheckedSetDevice(original_device_);
|
114 |
+
}
|
115 |
+
|
116 |
+
/// Sets the device to the given one.
|
117 |
+
template <
|
118 |
+
typename U = T,
|
119 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>, int> = 0>
|
120 |
+
void set_device(at::Device device) {
|
121 |
+
AT_ASSERT(
|
122 |
+
(U::static_type == DeviceType::HIP && device.is_cuda()) ||
|
123 |
+
device.type() == U::static_type);
|
124 |
+
auto index = device.index();
|
125 |
+
if (index == -1)
|
126 |
+
return;
|
127 |
+
impl_.setDevice(device);
|
128 |
+
current_device_ = device;
|
129 |
+
}
|
130 |
+
|
131 |
+
/// Resets the currently set device to its original device, and then sets the
|
132 |
+
/// current device to the passed device. This is effectively equivalent to
|
133 |
+
/// set_device when a guard supports only a single device type.
|
134 |
+
template <typename U = T>
|
135 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>> reset_device(
|
136 |
+
at::Device device) {
|
137 |
+
set_device(device);
|
138 |
+
}
|
139 |
+
|
140 |
+
/// Resets the currently set device to its original device, and then sets the
|
141 |
+
/// current device to the passed device (for a possibly different device
|
142 |
+
/// type).
|
143 |
+
///
|
144 |
+
/// This method is named reset_device to highlight the fact that previous
|
145 |
+
/// device settings from this guard are NOT preserved, even if the device
|
146 |
+
/// has a different device type. For example:
|
147 |
+
///
|
148 |
+
/// // CUDA device is 0
|
149 |
+
/// DeviceGuard g(Device(kCUDA, 1));
|
150 |
+
/// g.reset_device(Device(kHIP, 2));
|
151 |
+
/// // CUDA device is 0 (!!)
|
152 |
+
///
|
153 |
+
/// NOTE: this implementation may skip some device setting if it can prove
|
154 |
+
/// that it is unnecessary.
|
155 |
+
///
|
156 |
+
/// Optional argument is for testing only.
|
157 |
+
template <typename U = T>
|
158 |
+
typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>> reset_device(
|
159 |
+
at::Device device,
|
160 |
+
const impl::DeviceGuardImplInterface* impl = nullptr) {
|
161 |
+
auto index = device.index();
|
162 |
+
if (index == -1)
|
163 |
+
return;
|
164 |
+
if (device.type() == original_device_.type()) {
|
165 |
+
AT_ASSERT(impl == nullptr || impl->type() == device.type());
|
166 |
+
impl_.setDevice(device);
|
167 |
+
current_device_ = device;
|
168 |
+
} else {
|
169 |
+
// Destruct and reconstruct the DeviceGuard in place
|
170 |
+
impl_.setDevice(original_device_);
|
171 |
+
impl_ = !impl ? VirtualGuardImpl(device.type()) : VirtualGuardImpl(impl);
|
172 |
+
original_device_ = impl_.exchangeDevice(device);
|
173 |
+
current_device_ = device;
|
174 |
+
}
|
175 |
+
}
|
176 |
+
|
177 |
+
/// Sets the device index to the given one. The device type is inferred
|
178 |
+
/// from the original device type.
|
179 |
+
void set_index(DeviceIndex index) {
|
180 |
+
reset_device(Device(original_device_.type(), index));
|
181 |
+
}
|
182 |
+
|
183 |
+
/// Returns the device that was set at the time the most recent
|
184 |
+
/// reset_device(), or otherwise the device at construction time.
|
185 |
+
Device original_device() const {
|
186 |
+
return original_device_;
|
187 |
+
}
|
188 |
+
|
189 |
+
/// Returns the most recent device that was set using this device guard,
|
190 |
+
/// either from construction, or via set_device/reset_device/set_index.
|
191 |
+
Device current_device() const {
|
192 |
+
return current_device_;
|
193 |
+
}
|
194 |
+
|
195 |
+
protected:
|
196 |
+
T impl_;
|
197 |
+
|
198 |
+
private:
|
199 |
+
Device original_device_;
|
200 |
+
Device current_device_;
|
201 |
+
};
|
202 |
+
|
203 |
+
/**
|
204 |
+
* A OptionalDeviceGuard is an RAII class that sets a device to some value on
|
205 |
+
* initialization, and resets the device to its original value on destruction.
|
206 |
+
*
|
207 |
+
* InlineOptionalDeviceGuard is a helper class for implementing
|
208 |
+
* OptionalDeviceGuards. See guidance in InlineDeviceGuard on how to
|
209 |
+
* use this. See OptionalDeviceGuard for user-oriented usage notes.
|
210 |
+
*/
|
211 |
+
template <typename T>
|
212 |
+
class InlineOptionalDeviceGuard {
|
213 |
+
public:
|
214 |
+
// Note [Explicit initialization of optional fields]
|
215 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
216 |
+
// Explicit initialization of optional fields
|
217 |
+
// required to workaround an nvcc bug; see
|
218 |
+
// https://github.com/pytorch/pytorch/issues/12117
|
219 |
+
|
220 |
+
/// Creates an uninitialized OptionalDeviceGuard.
|
221 |
+
explicit InlineOptionalDeviceGuard()
|
222 |
+
: guard_() // See Note [Explicit initialization of optional fields]
|
223 |
+
{}
|
224 |
+
|
225 |
+
/// Set the current device to the passed Device, if it is not nullopt.
|
226 |
+
explicit InlineOptionalDeviceGuard(optional<Device> device_opt)
|
227 |
+
: guard_() { // See Note [Explicit initialization of optional fields]
|
228 |
+
if (device_opt.has_value()) {
|
229 |
+
guard_.emplace(device_opt.value());
|
230 |
+
}
|
231 |
+
}
|
232 |
+
|
233 |
+
/// Set the current device to the passed DeviceIndex, if it is not nullopt.
|
234 |
+
template <
|
235 |
+
typename U = T,
|
236 |
+
typename =
|
237 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
|
238 |
+
explicit InlineOptionalDeviceGuard(optional<DeviceIndex> device_index_opt)
|
239 |
+
: guard_() { // See Note [Explicit initialization of optional fields]
|
240 |
+
if (device_index_opt.has_value()) {
|
241 |
+
guard_.emplace(device_index_opt.value());
|
242 |
+
}
|
243 |
+
}
|
244 |
+
|
245 |
+
/// All constructors of DeviceGuard are valid for OptionalDeviceGuard
|
246 |
+
/// and result in initialized OptionalDeviceGuard.
|
247 |
+
template <typename... Args>
|
248 |
+
explicit InlineOptionalDeviceGuard(Args&&... args)
|
249 |
+
: guard_(std::in_place, std::forward<Args>(args)...) {}
|
250 |
+
|
251 |
+
// TODO: Consider reading Tensor and TensorList constructors here, when
|
252 |
+
// Tensor moves to c10. (These are only valid on OptionalDeviceGuard,
|
253 |
+
// because a Tensor may be undefined, in which case we need an uninitialized
|
254 |
+
// tensor guard.)
|
255 |
+
|
256 |
+
// Note [Move construction for RAII guards is tricky]
|
257 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
258 |
+
// In principle, move construction is useful for terminating
|
259 |
+
// the lifetime of a `OptionalDeviceGuard` early; for example:
|
260 |
+
//
|
261 |
+
// // current device is d0
|
262 |
+
// OptionalDeviceGuard g1(d1);
|
263 |
+
// // current device is d1
|
264 |
+
// {
|
265 |
+
// OptionalDeviceGuard g2(std::move(g1));
|
266 |
+
// }
|
267 |
+
// // current device is d0!!
|
268 |
+
//
|
269 |
+
// However, it's difficult to implement the move constructor
|
270 |
+
// in a way that works in all situations. For example, consider
|
271 |
+
// the following example:
|
272 |
+
//
|
273 |
+
// OptionalDeviceGuard g1(d1);
|
274 |
+
// {
|
275 |
+
// OptionalDeviceGuard g2(d2);
|
276 |
+
// {
|
277 |
+
// OptionalDeviceGuard g3(std::move(g1)); // !!!
|
278 |
+
// }
|
279 |
+
// }
|
280 |
+
//
|
281 |
+
// What should the current device be while g3 in scope... and what
|
282 |
+
// should it be after it goes out of scope? What about g2?
|
283 |
+
// There don't seem to be satisfactory answers for these questions.
|
284 |
+
//
|
285 |
+
// It's in principle possible to raise an error when this occurs
|
286 |
+
// by doing some extra thread-local bookkeeping. But why bother?
|
287 |
+
// Just don't provide the constructor.
|
288 |
+
InlineOptionalDeviceGuard(InlineOptionalDeviceGuard<T>&& other) = delete;
|
289 |
+
|
290 |
+
// Note [Move assignment for RAII guards is tricky]
|
291 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
292 |
+
// Move assignment is deleted, because you need to know which guard was
|
293 |
+
// defined "first", as that guard's original_device_ wins--with the current
|
294 |
+
// representation, we have no way of telling which is the case. (Move
|
295 |
+
// construction does not have this problem, as one guard is always
|
296 |
+
// uninitialized.)
|
297 |
+
//
|
298 |
+
// We can make this clear by way of a pair of examples:
|
299 |
+
//
|
300 |
+
// Example 1:
|
301 |
+
//
|
302 |
+
// // initial device is n0
|
303 |
+
// {
|
304 |
+
// CUDAGuard g1(n1);
|
305 |
+
// {
|
306 |
+
// CUDAGuard g2(n2);
|
307 |
+
// // current device should be n2
|
308 |
+
// g1 = std::move(g2);
|
309 |
+
// // current device should still be n2
|
310 |
+
// }
|
311 |
+
// // current device should still be n2
|
312 |
+
// }
|
313 |
+
// // current device should be n0
|
314 |
+
//
|
315 |
+
// Example 2 (flip the order of the two guards):
|
316 |
+
//
|
317 |
+
// // initial device is n0
|
318 |
+
// {
|
319 |
+
// CUDAGuard g2(n2);
|
320 |
+
// {
|
321 |
+
// CUDAGuard g1(n1);
|
322 |
+
// // current device should be n1
|
323 |
+
// g1 = std::move(g2);
|
324 |
+
// // current device should be n2
|
325 |
+
// }
|
326 |
+
// // current device should be n0 (since g2 has been vacated)
|
327 |
+
// }
|
328 |
+
//
|
329 |
+
// In both examples, we need g1 to restore to n0 after move assignment.
|
330 |
+
// However, in example 1, this is determined by the restore value of g1
|
331 |
+
// (prior to the move). In example 2, however, it is determined by the the
|
332 |
+
// restore value of g2(!!). We don't know which one should win, without having
|
333 |
+
// a way of telling which guard was allocated first.
|
334 |
+
//
|
335 |
+
// We could solve this with an extra thread-local variable. But no one is
|
336 |
+
// actually using move-assignment. So just get rid of it.
|
337 |
+
InlineOptionalDeviceGuard& operator=(InlineOptionalDeviceGuard&& other) =
|
338 |
+
delete;
|
339 |
+
|
340 |
+
/// Sets the device to the given one. Initializes OptionalDeviceGuard if it
|
341 |
+
/// is not already initialized.
|
342 |
+
template <
|
343 |
+
typename U = T,
|
344 |
+
typename =
|
345 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
|
346 |
+
void set_device(at::Device device) {
|
347 |
+
if (!guard_.has_value()) {
|
348 |
+
guard_.emplace(device);
|
349 |
+
} else {
|
350 |
+
guard_->set_device(device);
|
351 |
+
}
|
352 |
+
}
|
353 |
+
|
354 |
+
/// Resets the currently set device to its original device, and then sets the
|
355 |
+
/// current device to the passed device (for a possibly different device
|
356 |
+
/// type). Initializes OptionalDeviceGuard if it is not already initialized.
|
357 |
+
///
|
358 |
+
/// See notes on why this is called reset_device on InlineDeviceGuard.
|
359 |
+
///
|
360 |
+
/// Optional argument is for testing only.
|
361 |
+
template <
|
362 |
+
typename U = T,
|
363 |
+
typename = typename std::enable_if_t<std::is_same_v<U, VirtualGuardImpl>>>
|
364 |
+
void reset_device(
|
365 |
+
at::Device device,
|
366 |
+
const DeviceGuardImplInterface* impl = nullptr) {
|
367 |
+
if (!guard_.has_value()) {
|
368 |
+
guard_.emplace(device, impl);
|
369 |
+
} else {
|
370 |
+
guard_->reset_device(device, impl);
|
371 |
+
}
|
372 |
+
}
|
373 |
+
|
374 |
+
/// Resets the currently set device to its original device, and then sets the
|
375 |
+
/// current device to the passed device. Initializes the guard if it is
|
376 |
+
/// not already initialized. This is effectively equivalent to set_device
|
377 |
+
/// when a guard supports only a single device type.
|
378 |
+
template <
|
379 |
+
typename U = T,
|
380 |
+
typename =
|
381 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
|
382 |
+
void reset_device(at::Device device) {
|
383 |
+
if (!guard_.has_value()) {
|
384 |
+
guard_.emplace(device);
|
385 |
+
} else {
|
386 |
+
guard_->reset_device(device);
|
387 |
+
}
|
388 |
+
}
|
389 |
+
|
390 |
+
/// Sets the device index to the given one. The device type is statically
|
391 |
+
/// known.
|
392 |
+
template <
|
393 |
+
typename U = T,
|
394 |
+
typename =
|
395 |
+
typename std::enable_if_t<!std::is_same_v<U, VirtualGuardImpl>>>
|
396 |
+
void set_index(DeviceIndex index) {
|
397 |
+
if (!guard_.has_value()) {
|
398 |
+
guard_.emplace(index);
|
399 |
+
} else {
|
400 |
+
guard_->set_index(index);
|
401 |
+
}
|
402 |
+
}
|
403 |
+
|
404 |
+
/// Returns the device that was set immediately prior to initialization of
|
405 |
+
/// the, guard, or nullopt if the guard is uninitialized.
|
406 |
+
optional<Device> original_device() const {
|
407 |
+
return guard_.has_value() ? make_optional(guard_->original_device())
|
408 |
+
: nullopt;
|
409 |
+
}
|
410 |
+
|
411 |
+
/// Returns the most recent device that was set using this device guard,
|
412 |
+
/// either from construction, or via set_device, if the guard is initialized,
|
413 |
+
/// or nullopt if the guard is uninitialized.
|
414 |
+
optional<Device> current_device() const {
|
415 |
+
return guard_.has_value() ? make_optional(guard_->current_device())
|
416 |
+
: nullopt;
|
417 |
+
}
|
418 |
+
|
419 |
+
/// Restore the original device, resetting this guard to uninitialized state.
|
420 |
+
void reset() {
|
421 |
+
guard_.reset();
|
422 |
+
}
|
423 |
+
|
424 |
+
private:
|
425 |
+
optional<InlineDeviceGuard<T>> guard_;
|
426 |
+
};
|
427 |
+
|
428 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/InlineEvent.h
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceType.h>
|
4 |
+
#include <c10/core/Stream.h>
|
5 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
6 |
+
#include <c10/util/Exception.h>
|
7 |
+
|
8 |
+
namespace c10::impl {
|
9 |
+
|
10 |
+
template <typename T>
|
11 |
+
struct InlineEvent final {
|
12 |
+
InlineEvent() = delete;
|
13 |
+
InlineEvent(
|
14 |
+
const DeviceType _device_type,
|
15 |
+
const EventFlag _flag = EventFlag::PYTORCH_DEFAULT)
|
16 |
+
: backend_{_device_type}, device_type_{_device_type}, flag_{_flag} {}
|
17 |
+
|
18 |
+
// Copy constructor and copy assignment operator (deleted)
|
19 |
+
InlineEvent(const InlineEvent&) = delete;
|
20 |
+
InlineEvent& operator=(const InlineEvent&) = delete;
|
21 |
+
|
22 |
+
// Move constructor and move assignment operator
|
23 |
+
InlineEvent(InlineEvent&& other) noexcept
|
24 |
+
: event_(other.event_),
|
25 |
+
backend_(std::move(other.backend_)),
|
26 |
+
device_type_(other.device_type_),
|
27 |
+
device_index_(other.device_index_),
|
28 |
+
flag_(other.flag_),
|
29 |
+
was_marked_for_recording_(other.was_marked_for_recording_) {
|
30 |
+
other.event_ = nullptr;
|
31 |
+
}
|
32 |
+
InlineEvent& operator=(InlineEvent&& other) noexcept {
|
33 |
+
swap(other);
|
34 |
+
return *this;
|
35 |
+
}
|
36 |
+
|
37 |
+
void swap(InlineEvent& other) noexcept {
|
38 |
+
std::swap(event_, other.event_);
|
39 |
+
std::swap(backend_, other.backend_);
|
40 |
+
std::swap(device_type_, other.device_type_);
|
41 |
+
std::swap(device_index_, other.device_index_);
|
42 |
+
std::swap(flag_, other.flag_);
|
43 |
+
std::swap(was_marked_for_recording_, other.was_marked_for_recording_);
|
44 |
+
}
|
45 |
+
|
46 |
+
~InlineEvent() noexcept {
|
47 |
+
if (event_)
|
48 |
+
backend_.destroyEvent(event_, device_index_);
|
49 |
+
}
|
50 |
+
|
51 |
+
DeviceType device_type() const noexcept {
|
52 |
+
return device_type_;
|
53 |
+
}
|
54 |
+
DeviceIndex device_index() const noexcept {
|
55 |
+
return device_index_;
|
56 |
+
}
|
57 |
+
EventFlag flag() const noexcept {
|
58 |
+
return flag_;
|
59 |
+
}
|
60 |
+
bool was_marked_for_recording() const noexcept {
|
61 |
+
return was_marked_for_recording_;
|
62 |
+
}
|
63 |
+
|
64 |
+
void recordOnce(const Stream& stream) {
|
65 |
+
if (!was_marked_for_recording_)
|
66 |
+
record(stream);
|
67 |
+
}
|
68 |
+
|
69 |
+
void record(const Stream& stream) {
|
70 |
+
TORCH_CHECK(
|
71 |
+
stream.device_type() == device_type_,
|
72 |
+
"Event device type ",
|
73 |
+
DeviceTypeName(device_type_),
|
74 |
+
" does not match recording stream's device type ",
|
75 |
+
DeviceTypeName(stream.device_type()),
|
76 |
+
".");
|
77 |
+
|
78 |
+
backend_.record(&event_, stream, device_index_, flag_);
|
79 |
+
was_marked_for_recording_ = true;
|
80 |
+
device_index_ = stream.device_index();
|
81 |
+
}
|
82 |
+
|
83 |
+
void block(const Stream& stream) const {
|
84 |
+
if (!was_marked_for_recording_)
|
85 |
+
return;
|
86 |
+
|
87 |
+
TORCH_CHECK(
|
88 |
+
stream.device_type() == device_type_,
|
89 |
+
"Event device type ",
|
90 |
+
DeviceTypeName(device_type_),
|
91 |
+
" does not match blocking stream's device type ",
|
92 |
+
DeviceTypeName(stream.device_type()),
|
93 |
+
".");
|
94 |
+
|
95 |
+
backend_.block(event_, stream);
|
96 |
+
}
|
97 |
+
|
98 |
+
bool query() const {
|
99 |
+
if (!was_marked_for_recording_)
|
100 |
+
return true;
|
101 |
+
return backend_.queryEvent(event_);
|
102 |
+
}
|
103 |
+
|
104 |
+
private:
|
105 |
+
void* event_ = nullptr;
|
106 |
+
T backend_;
|
107 |
+
DeviceType device_type_;
|
108 |
+
DeviceIndex device_index_ = -1;
|
109 |
+
EventFlag flag_ = EventFlag::PYTORCH_DEFAULT;
|
110 |
+
bool was_marked_for_recording_ = false;
|
111 |
+
};
|
112 |
+
|
113 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/LocalDispatchKeySet.h
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DispatchKeySet.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
|
6 |
+
// TLS management for DispatchKeySet (the "local" DispatchKeySet(s))
|
7 |
+
//
|
8 |
+
// This manages two thread-local DispatchKeySets:
|
9 |
+
//
|
10 |
+
// - The included type set, which adds a tensor type for consideration
|
11 |
+
// in dispatch. (For example, you might add Profiling to
|
12 |
+
// the included type set to turn on profiling on all tensor operations.)
|
13 |
+
//
|
14 |
+
// - The excluded type set, which disqualifies a tensor type from dispatch.
|
15 |
+
// (For example, after redispatching on variable, we disqualify
|
16 |
+
// Autograd so we don't attempt to handle variable again.)
|
17 |
+
// (Exclusion wins over inclusion.)
|
18 |
+
//
|
19 |
+
// NB: Originally, I implemented the excluded type set as storing the inverted
|
20 |
+
// set, but TLS is defined to be zero-initialized, so this doesn't actually work
|
21 |
+
// (if it's inverted, you want the set to be -1 initialized).
|
22 |
+
|
23 |
+
namespace c10::impl {
|
24 |
+
|
25 |
+
// POD version of LocalDispatchKeySet. Declared here just so that
|
26 |
+
// we can put it in the guards.
|
27 |
+
// This struct encapsulates special handling for TLS initialization
|
28 |
+
// in set_included()/included() API so that they reflect the truth.
|
29 |
+
// If you want to create PODLocalDispatchKeySet with non-zero state,
|
30 |
+
// use set_included() instead of default constructor.
|
31 |
+
struct C10_API PODLocalDispatchKeySet {
|
32 |
+
uint64_t included_;
|
33 |
+
uint64_t excluded_;
|
34 |
+
|
35 |
+
// See Note [TLS Initialization]
|
36 |
+
DispatchKeySet included() const {
|
37 |
+
return DispatchKeySet(DispatchKeySet::RAW, included_) ^
|
38 |
+
c10::default_included_set;
|
39 |
+
}
|
40 |
+
DispatchKeySet excluded() const {
|
41 |
+
return DispatchKeySet(DispatchKeySet::RAW, excluded_) ^
|
42 |
+
c10::default_excluded_set;
|
43 |
+
}
|
44 |
+
|
45 |
+
void set_included(DispatchKeySet x) {
|
46 |
+
included_ = (x ^ c10::default_included_set).raw_repr();
|
47 |
+
}
|
48 |
+
void set_excluded(DispatchKeySet x) {
|
49 |
+
excluded_ = (x ^ c10::default_excluded_set).raw_repr();
|
50 |
+
}
|
51 |
+
};
|
52 |
+
static_assert(
|
53 |
+
std::is_trivial_v<PODLocalDispatchKeySet>,
|
54 |
+
"PODLocalDispatchKeySet must be a POD type.");
|
55 |
+
|
56 |
+
struct C10_API LocalDispatchKeySet {
|
57 |
+
/* implicit */ LocalDispatchKeySet(PODLocalDispatchKeySet x)
|
58 |
+
: included_(x.included()), excluded_(x.excluded()) {}
|
59 |
+
DispatchKeySet included_;
|
60 |
+
DispatchKeySet excluded_;
|
61 |
+
};
|
62 |
+
|
63 |
+
// thread_local variables cannot be C10_API on Windows.
|
64 |
+
// Inlining this seems to break AutoDispatchBelowAutograd on Android.
|
65 |
+
#if defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
|
66 |
+
C10_API LocalDispatchKeySet tls_local_dispatch_key_set();
|
67 |
+
#else // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
|
68 |
+
extern C10_API thread_local PODLocalDispatchKeySet raw_local_dispatch_key_set;
|
69 |
+
|
70 |
+
inline C10_API LocalDispatchKeySet tls_local_dispatch_key_set() {
|
71 |
+
// Don't let people fiddle with the thread_local directly just
|
72 |
+
// because they include this header.
|
73 |
+
return raw_local_dispatch_key_set;
|
74 |
+
}
|
75 |
+
#endif // defined(_MSC_VER) || defined(C10_ANDROID) || defined(C10_IPHONE)
|
76 |
+
|
77 |
+
// Internal, use ThreadLocalStateGuard
|
78 |
+
C10_API void _force_tls_local_dispatch_key_set(LocalDispatchKeySet key_set);
|
79 |
+
|
80 |
+
// RAII API for manipulating the thread-local dispatch state.
|
81 |
+
|
82 |
+
class C10_API IncludeDispatchKeyGuard {
|
83 |
+
public:
|
84 |
+
IncludeDispatchKeyGuard(DispatchKeySet);
|
85 |
+
IncludeDispatchKeyGuard(DispatchKey k)
|
86 |
+
: IncludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
87 |
+
IncludeDispatchKeyGuard(const IncludeDispatchKeyGuard&) = delete;
|
88 |
+
IncludeDispatchKeyGuard operator=(const IncludeDispatchKeyGuard&) = delete;
|
89 |
+
IncludeDispatchKeyGuard(IncludeDispatchKeyGuard&&) = delete;
|
90 |
+
IncludeDispatchKeyGuard operator=(IncludeDispatchKeyGuard&&) = delete;
|
91 |
+
~IncludeDispatchKeyGuard();
|
92 |
+
|
93 |
+
private:
|
94 |
+
// A little micro-optimization to save us from tls_get_addr call
|
95 |
+
// on destruction
|
96 |
+
PODLocalDispatchKeySet* tls_;
|
97 |
+
DispatchKeySet include_;
|
98 |
+
};
|
99 |
+
|
100 |
+
class C10_API ExcludeDispatchKeyGuard {
|
101 |
+
public:
|
102 |
+
ExcludeDispatchKeyGuard(DispatchKeySet);
|
103 |
+
ExcludeDispatchKeyGuard(DispatchKey k)
|
104 |
+
: ExcludeDispatchKeyGuard(DispatchKeySet(k)) {}
|
105 |
+
ExcludeDispatchKeyGuard(const ExcludeDispatchKeyGuard&) = delete;
|
106 |
+
ExcludeDispatchKeyGuard operator=(const ExcludeDispatchKeyGuard&) = delete;
|
107 |
+
ExcludeDispatchKeyGuard(ExcludeDispatchKeyGuard&&) = delete;
|
108 |
+
ExcludeDispatchKeyGuard operator=(ExcludeDispatchKeyGuard&&) = delete;
|
109 |
+
~ExcludeDispatchKeyGuard();
|
110 |
+
|
111 |
+
private:
|
112 |
+
// A little micro-optimization to save us from tls_get_addr call
|
113 |
+
// on destruction
|
114 |
+
PODLocalDispatchKeySet* tls_;
|
115 |
+
DispatchKeySet exclude_;
|
116 |
+
};
|
117 |
+
|
118 |
+
struct C10_API ForceDispatchKeyGuard {
|
119 |
+
public:
|
120 |
+
ForceDispatchKeyGuard(c10::impl::LocalDispatchKeySet key_set)
|
121 |
+
: saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {
|
122 |
+
c10::impl::_force_tls_local_dispatch_key_set(key_set);
|
123 |
+
}
|
124 |
+
ForceDispatchKeyGuard(
|
125 |
+
c10::DispatchKeySet include,
|
126 |
+
c10::DispatchKeySet exclude)
|
127 |
+
: saved_keyset_(c10::impl::tls_local_dispatch_key_set()) {
|
128 |
+
auto updated_set = saved_keyset_;
|
129 |
+
updated_set.included_ = include;
|
130 |
+
updated_set.excluded_ = exclude;
|
131 |
+
c10::impl::_force_tls_local_dispatch_key_set(updated_set);
|
132 |
+
}
|
133 |
+
~ForceDispatchKeyGuard() {
|
134 |
+
c10::impl::_force_tls_local_dispatch_key_set(saved_keyset_);
|
135 |
+
}
|
136 |
+
|
137 |
+
private:
|
138 |
+
c10::impl::LocalDispatchKeySet saved_keyset_;
|
139 |
+
};
|
140 |
+
|
141 |
+
// Non-RAII API for manipulating the thread-local dispatch state.
|
142 |
+
// Please prefer the RAII API. The non-RAII API may be useful when
|
143 |
+
// the included/excluded state of a given DispatchKey must span
|
144 |
+
// many calls from the Python to the C++, so you cannot conveniently
|
145 |
+
// use an RAII guard.
|
146 |
+
//
|
147 |
+
// Example use case: a Python context manager that includes a certain
|
148 |
+
// DispatchKey, to ensure ops running under the context manager dispatch
|
149 |
+
// through that DispatchKey's registered overrides.
|
150 |
+
//
|
151 |
+
// The non-RAII API is less efficient than the RAII guards because both the
|
152 |
+
// getter and setter will do a tls_getaddr lookup (the RAII struct only needs
|
153 |
+
// one!)
|
154 |
+
|
155 |
+
C10_API bool tls_is_dispatch_key_excluded(DispatchKey x);
|
156 |
+
C10_API void tls_set_dispatch_key_excluded(DispatchKey x, bool desired_state);
|
157 |
+
C10_API bool tls_is_dispatch_key_included(DispatchKey x);
|
158 |
+
C10_API void tls_set_dispatch_key_included(DispatchKey x, bool desired_state);
|
159 |
+
C10_API bool tls_is_dispatch_keyset_excluded(DispatchKeySet ks);
|
160 |
+
C10_API bool tls_is_dispatch_keyset_included(DispatchKeySet ks);
|
161 |
+
|
162 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PyInterpreter.h
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/core/DispatchKeySet.h>
|
5 |
+
#include <c10/core/Layout.h>
|
6 |
+
#include <c10/core/MemoryFormat.h>
|
7 |
+
#include <c10/core/SymIntArrayRef.h>
|
8 |
+
#include <c10/macros/Export.h>
|
9 |
+
#include <c10/util/ArrayRef.h>
|
10 |
+
#include <c10/util/intrusive_ptr.h>
|
11 |
+
#include <c10/util/python_stub.h>
|
12 |
+
#include <string>
|
13 |
+
#include <vector>
|
14 |
+
|
15 |
+
// Forward declarations
|
16 |
+
|
17 |
+
namespace c10 {
|
18 |
+
struct IValue;
|
19 |
+
class OperatorHandle;
|
20 |
+
struct TensorImpl;
|
21 |
+
} // namespace c10
|
22 |
+
|
23 |
+
namespace torch::jit {
|
24 |
+
using Stack = std::vector<c10::IValue>;
|
25 |
+
}
|
26 |
+
|
27 |
+
// Actual implementation
|
28 |
+
|
29 |
+
namespace c10::impl {
|
30 |
+
|
31 |
+
struct C10_API PyInterpreter;
|
32 |
+
|
33 |
+
// Note [Python interpreter tag]
|
34 |
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
35 |
+
// Traditionally, PyTorch is layered such that our Python library
|
36 |
+
// (libtorch_python) references our pure C++ library (libtorch) as the
|
37 |
+
// natural order of things. However, sometimes this natural order is
|
38 |
+
// subverted: C++ objects refer to Python objects (for example, we
|
39 |
+
// store a PyObject* pointer on TensorImpl so that converting from a
|
40 |
+
// C++ Tensor to a Python Tensor is just a memory dereference).
|
41 |
+
//
|
42 |
+
// These unusual orderings must be treated with care. To start, you need to
|
43 |
+
// virtualize the destructor so that the PyObject can be decref'ed on
|
44 |
+
// destruction (because the C++ object itself doesn't know anything about
|
45 |
+
// Python--remember, layering!). This process itself is fraught, since
|
46 |
+
// acquiring the GIL could lead to deadlocks if someone is blocking on you
|
47 |
+
// while holding the GIL. Furthermore, if the C++ objects outlive the
|
48 |
+
// interpreter (which can happen if you stash them in a static global
|
49 |
+
// variable defined in libtorch), you may attempt to decref the object when
|
50 |
+
// the Python interpreter has already been shutdown.
|
51 |
+
//
|
52 |
+
// BUT WAIT, IT GETS WORSE. With torchdeploy, there may be multiple Python
|
53 |
+
// interpreters in a single process. If a C++ object is accessible from
|
54 |
+
// multiple interpreters, we must take care not to accidentally pass a
|
55 |
+
// PyObject from one interpreter with another interpreter.
|
56 |
+
//
|
57 |
+
// To prevent these mixups, we introduce a PyInterpreter "tag" (object with
|
58 |
+
// a vtable), which specifies a specific Python interpreter.
|
59 |
+
//
|
60 |
+
// - Any given object can be associated with AT MOST one Python interpreter.
|
61 |
+
// We represent the interpreter tag as a memory address to an instance of
|
62 |
+
// a virtual class that is allocated once per interpreter (this is so that
|
63 |
+
// we can request the interpreter to perform operations for us, if
|
64 |
+
// necessary).
|
65 |
+
//
|
66 |
+
// - It can be recorded with a PyObject (PyInterpreterObject) so that
|
67 |
+
// we know what interpreter the object is associated with, and we can
|
68 |
+
// raise an error if you try to use the PyObject from the wrong
|
69 |
+
// interpreter context.
|
70 |
+
//
|
71 |
+
// - It contains a vtable that can be used to perform various Python
|
72 |
+
// operations from ordinary C++ code that ordinarily wouldn't be accessible
|
73 |
+
// from libtorch.
|
74 |
+
//
|
75 |
+
// A simple use case is when a C++ object must be associated with a PyObject.
|
76 |
+
// However, for TensorImpl, we lazily allocate a PyObject the first time the
|
77 |
+
// object passes into Python. The invariants for this situation are more
|
78 |
+
// subtle:
|
79 |
+
//
|
80 |
+
// - A given TensorImpl's interpreter tag can only go from uninitialized to
|
81 |
+
// tagged; once tagged, this is a quiescent state (once tagged to an
|
82 |
+
// interpreter, ALWAYS tagged to that interpreter)
|
83 |
+
//
|
84 |
+
// - A thread may mutate the PyObject field of a TensorImpl if and only if it
|
85 |
+
// holds the GIL for the interpreter tagged on the TensorImpl. (If the
|
86 |
+
// TensorImpl is not tagged, it must first atomically claim its tag before it
|
87 |
+
// can validly write)
|
88 |
+
//
|
89 |
+
// WARNING: This class has to be written very carefully, because it may be
|
90 |
+
// possible for a Tensor to have a reference an interpreter corresponding to
|
91 |
+
// a shared library that has ALREADY BEEN UNLOADED. This makes blindly calling
|
92 |
+
// virtual methods very dangerous, because the vtable may be garbage at that
|
93 |
+
// point (on a good day, you might get "pure virtual method called").
|
94 |
+
//
|
95 |
+
// The idea to solve this problem is we always leak PyInterpreters (so they
|
96 |
+
// always stay live even after dlclose), and make sure we can disarm their
|
97 |
+
// virtual methods by indirecting through a separate PyInterpreterVTable
|
98 |
+
// object. This can be replaced with a no-op vtable from libc10.so, which
|
99 |
+
// is guaranteed to stick around until the bitter end.
|
100 |
+
//
|
101 |
+
// NB: The downside with representing PyInterpreter tags as full objects is that
|
102 |
+
// it takes an extra word on TensorImpl. If tags were instead just integer
|
103 |
+
// indices, on 64-bit architectures we could pack the tag and PyObject together
|
104 |
+
// into a single atomic word. On 32-bit architectures we could simply say that
|
105 |
+
// only one Python interpreter is supported (erroring if a nontrivial
|
106 |
+
// interpreter tag is attempted to be set).
|
107 |
+
//
|
108 |
+
// The difficulty with this scheme is we need to maintain an out-of-line table
|
109 |
+
// to get at the PyInterpreters so that we can do virtual method calls on them,
|
110 |
+
// and registration/deregistration to this table must be done in a thread safe
|
111 |
+
// manner. This can be easily done if the number of possible PyInterpreters is
|
112 |
+
// small enough (e.g., 8-bit integer) by simply preallocating an array of
|
113 |
+
// sufficient size to hold all possible interpreters. Surely 128 threads is
|
114 |
+
// more than enough for anyone!
|
115 |
+
//
|
116 |
+
// I didn't decide to do this technique at the moment, because the extra word
|
117 |
+
// added by the PyInterpreter tag takes us to 24 words, which means that we
|
118 |
+
// still fit inside three eight word cache lines. If you need to penny pinch
|
119 |
+
// another word consider doing this!
|
120 |
+
|
121 |
+
struct C10_API PyInterpreterVTable {
|
122 |
+
virtual ~PyInterpreterVTable() = default;
|
123 |
+
|
124 |
+
// Report the name of this interpreter
|
125 |
+
virtual std::string name() const = 0;
|
126 |
+
|
127 |
+
// Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call
|
128 |
+
// See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg]
|
129 |
+
virtual void decref(PyObject* pyobj, bool has_pyobj_slot) const = 0;
|
130 |
+
|
131 |
+
// Perform a detach by deferring to the __torch_dispatch__ implementation of
|
132 |
+
// detach, which will also arrange for the PyObject to get copied in this
|
133 |
+
// situation
|
134 |
+
virtual c10::intrusive_ptr<TensorImpl> detach(
|
135 |
+
const TensorImpl* self) const = 0;
|
136 |
+
|
137 |
+
// Invoke the Python boxed fallback dispatch to go back into Python
|
138 |
+
virtual void dispatch(const c10::OperatorHandle& op, torch::jit::Stack* stack)
|
139 |
+
const = 0;
|
140 |
+
|
141 |
+
virtual void reportErrorCallback(PyObject* callback, DispatchKey key)
|
142 |
+
const = 0;
|
143 |
+
|
144 |
+
// This is only invoked in the multipy/torchdeploy situation from
|
145 |
+
// pythonOpRegistrationTrampoline; this lets us get to the Python
|
146 |
+
// interpreter to actually find the appropriate Python op registration
|
147 |
+
// entry to call.
|
148 |
+
virtual void python_op_registration_trampoline(
|
149 |
+
const c10::OperatorHandle& op,
|
150 |
+
c10::DispatchKey,
|
151 |
+
torch::jit::Stack* stack) const = 0;
|
152 |
+
|
153 |
+
virtual void throw_abstract_impl_not_imported_error(
|
154 |
+
std::string opname,
|
155 |
+
const char* pymodule,
|
156 |
+
const char* context) const = 0;
|
157 |
+
|
158 |
+
// Invoke the Python dispatcher to handle this call
|
159 |
+
virtual void python_dispatcher(
|
160 |
+
const c10::OperatorHandle& op,
|
161 |
+
c10::DispatchKeySet,
|
162 |
+
torch::jit::Stack* stack) const = 0;
|
163 |
+
|
164 |
+
virtual bool is_contiguous(const TensorImpl* self, at::MemoryFormat)
|
165 |
+
const = 0;
|
166 |
+
virtual bool is_strides_like(const TensorImpl* self, at::MemoryFormat)
|
167 |
+
const = 0;
|
168 |
+
virtual bool is_non_overlapping_and_dense(const TensorImpl* self) const = 0;
|
169 |
+
virtual c10::Device device(const TensorImpl* self) const = 0;
|
170 |
+
virtual int64_t dim(const TensorImpl* self) const = 0;
|
171 |
+
virtual c10::IntArrayRef strides(const TensorImpl* self) const = 0;
|
172 |
+
virtual c10::IntArrayRef sizes(const TensorImpl* self) const = 0;
|
173 |
+
virtual c10::SymIntArrayRef sym_sizes(const TensorImpl* self) const = 0;
|
174 |
+
virtual c10::Layout layout(const TensorImpl* self) const = 0;
|
175 |
+
virtual int64_t numel(const TensorImpl* self) const = 0;
|
176 |
+
virtual c10::SymInt sym_numel(const TensorImpl* self) const = 0;
|
177 |
+
virtual c10::SymIntArrayRef sym_strides(const TensorImpl* self) const = 0;
|
178 |
+
virtual c10::SymInt sym_storage_offset(const TensorImpl* self) const = 0;
|
179 |
+
|
180 |
+
virtual void trace_gpu_event_creation(uintptr_t event) const = 0;
|
181 |
+
virtual void trace_gpu_event_deletion(uintptr_t event) const = 0;
|
182 |
+
virtual void trace_gpu_event_record(uintptr_t event, uintptr_t stream)
|
183 |
+
const = 0;
|
184 |
+
virtual void trace_gpu_event_wait(uintptr_t event, uintptr_t stream)
|
185 |
+
const = 0;
|
186 |
+
virtual void trace_gpu_memory_allocation(uintptr_t ptr) const = 0;
|
187 |
+
virtual void trace_gpu_memory_deallocation(uintptr_t ptr) const = 0;
|
188 |
+
virtual void trace_gpu_stream_creation(uintptr_t stream) const = 0;
|
189 |
+
virtual void trace_gpu_device_synchronization() const = 0;
|
190 |
+
virtual void trace_gpu_stream_synchronization(uintptr_t stream) const = 0;
|
191 |
+
virtual void trace_gpu_event_synchronization(uintptr_t event) const = 0;
|
192 |
+
|
193 |
+
virtual void reset_backward_hooks(const TensorImpl* self) const = 0;
|
194 |
+
};
|
195 |
+
|
196 |
+
struct C10_API PyInterpreter {
|
197 |
+
const PyInterpreterVTable* vtable_;
|
198 |
+
|
199 |
+
PyInterpreter(const PyInterpreterVTable* vtable) : vtable_(vtable){};
|
200 |
+
|
201 |
+
const PyInterpreterVTable& operator*() const noexcept {
|
202 |
+
return *vtable_;
|
203 |
+
}
|
204 |
+
const PyInterpreterVTable* operator->() const noexcept {
|
205 |
+
return vtable_;
|
206 |
+
}
|
207 |
+
|
208 |
+
// Disarm this PyInterpreter, making all of its methods noops.
|
209 |
+
// The vtable pointer is not an atomic at the moment, which means
|
210 |
+
// a disarm() invocation that is concurrent with active destructors
|
211 |
+
// is not thread safe and will trigger TSAN. My hope is that this
|
212 |
+
// situations doesn't ever actually happen; tensor destruction should
|
213 |
+
// quiesce when a dlclose happens, and any long lived tensors whose
|
214 |
+
// destructors would be disarmed here only begin the destruction process
|
215 |
+
// on process shutdown (long after the dlclose has occurred).
|
216 |
+
void disarm() noexcept;
|
217 |
+
};
|
218 |
+
|
219 |
+
// PyInterpreterStatus describes what the state of its interpreter tag
|
220 |
+
// is, relative to the thread currently holding the GIL.
|
221 |
+
enum class PyInterpreterStatus {
|
222 |
+
// We just allocated the Tensor, it hasn't escaped to other threads,
|
223 |
+
// we know that it definitely hasn't been tagged to be associated
|
224 |
+
// with an interpreter.
|
225 |
+
DEFINITELY_UNINITIALIZED,
|
226 |
+
// We queried the interpreter field and it looked uninitialized. But
|
227 |
+
// another thread may have raced with us to tag it with some other
|
228 |
+
// interpreter id. So we will have to do a CEX to make sure we can
|
229 |
+
// actually nab it.
|
230 |
+
MAYBE_UNINITIALIZED,
|
231 |
+
// We queried the interpreter field and it was tagged to belong to us.
|
232 |
+
// This means we have sole write access (as we hold the GIL for this
|
233 |
+
// interpreter)
|
234 |
+
TAGGED_BY_US,
|
235 |
+
// Someone else tagged this. We can't use this TensorImpl from Python.
|
236 |
+
TAGGED_BY_OTHER,
|
237 |
+
};
|
238 |
+
|
239 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/PythonDispatcherTLS.h
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/impl/PyInterpreter.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
|
6 |
+
namespace c10::impl {
|
7 |
+
|
8 |
+
struct C10_API PythonDispatcherTLS {
|
9 |
+
static void set_state(PyInterpreter* state);
|
10 |
+
static PyInterpreter* get_state();
|
11 |
+
static void reset_state();
|
12 |
+
};
|
13 |
+
|
14 |
+
struct C10_API DisablePythonDispatcher {
|
15 |
+
DisablePythonDispatcher() : old_(PythonDispatcherTLS::get_state()) {
|
16 |
+
PythonDispatcherTLS::set_state({});
|
17 |
+
}
|
18 |
+
~DisablePythonDispatcher() {
|
19 |
+
PythonDispatcherTLS::set_state(old_);
|
20 |
+
}
|
21 |
+
PyInterpreter* old_;
|
22 |
+
};
|
23 |
+
|
24 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/SizesAndStrides.h
ADDED
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <algorithm>
|
4 |
+
#include <cstdint>
|
5 |
+
|
6 |
+
#include <c10/macros/Macros.h>
|
7 |
+
#include <c10/util/ArrayRef.h>
|
8 |
+
#include <c10/util/SmallVector.h>
|
9 |
+
|
10 |
+
#define C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE 5
|
11 |
+
|
12 |
+
namespace c10::impl {
|
13 |
+
|
14 |
+
// Packed container for TensorImpl sizes and strides.
|
15 |
+
// This design improves on the previous approach of using a pair of
|
16 |
+
// c10::SmallVector<int64_t, 5> by specializing for the operations we
|
17 |
+
// actually use and enforcing that the number of sizes is the same as
|
18 |
+
// the number of strides. The memory layout is as follows:
|
19 |
+
//
|
20 |
+
// 1 size_t for the size
|
21 |
+
// 5 eightbytes of inline sizes and 5 eightbytes of inline strides, OR pointer
|
22 |
+
// to out-of-line array
|
23 |
+
class C10_API SizesAndStrides {
|
24 |
+
public:
|
25 |
+
// TODO: different iterator types for sizes & strides to prevent
|
26 |
+
// mixing the two accidentally.
|
27 |
+
using sizes_iterator = int64_t*;
|
28 |
+
using sizes_const_iterator = const int64_t*;
|
29 |
+
using strides_iterator = int64_t*;
|
30 |
+
using strides_const_iterator = const int64_t*;
|
31 |
+
|
32 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
33 |
+
SizesAndStrides() {
|
34 |
+
size_at_unchecked(0) = 0;
|
35 |
+
stride_at_unchecked(0) = 1;
|
36 |
+
}
|
37 |
+
|
38 |
+
~SizesAndStrides() {
|
39 |
+
if (C10_UNLIKELY(!isInline())) {
|
40 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
41 |
+
free(outOfLineStorage_);
|
42 |
+
}
|
43 |
+
}
|
44 |
+
|
45 |
+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
|
46 |
+
SizesAndStrides(const SizesAndStrides& rhs) : size_(rhs.size_) {
|
47 |
+
if (C10_LIKELY(rhs.isInline())) {
|
48 |
+
copyDataInline(rhs);
|
49 |
+
} else {
|
50 |
+
allocateOutOfLineStorage(size_);
|
51 |
+
copyDataOutline(rhs);
|
52 |
+
}
|
53 |
+
}
|
54 |
+
|
55 |
+
SizesAndStrides& operator=(const SizesAndStrides& rhs) {
|
56 |
+
if (this == &rhs) {
|
57 |
+
return *this;
|
58 |
+
}
|
59 |
+
if (C10_LIKELY(rhs.isInline())) {
|
60 |
+
if (C10_UNLIKELY(!isInline())) {
|
61 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
62 |
+
free(outOfLineStorage_);
|
63 |
+
}
|
64 |
+
copyDataInline(rhs);
|
65 |
+
} else {
|
66 |
+
if (isInline()) {
|
67 |
+
allocateOutOfLineStorage(rhs.size_);
|
68 |
+
} else {
|
69 |
+
resizeOutOfLineStorage(rhs.size_);
|
70 |
+
}
|
71 |
+
copyDataOutline(rhs);
|
72 |
+
}
|
73 |
+
size_ = rhs.size_;
|
74 |
+
return *this;
|
75 |
+
}
|
76 |
+
|
77 |
+
// Move from rhs. rhs.size() == 0 afterwards.
|
78 |
+
SizesAndStrides(SizesAndStrides&& rhs) noexcept : size_(rhs.size_) {
|
79 |
+
if (C10_LIKELY(isInline())) {
|
80 |
+
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
|
81 |
+
} else {
|
82 |
+
outOfLineStorage_ = rhs.outOfLineStorage_;
|
83 |
+
rhs.outOfLineStorage_ = nullptr;
|
84 |
+
}
|
85 |
+
|
86 |
+
rhs.size_ = 0;
|
87 |
+
}
|
88 |
+
|
89 |
+
// Move from rhs. rhs.size() == 0 afterwards.
|
90 |
+
SizesAndStrides& operator=(SizesAndStrides&& rhs) noexcept {
|
91 |
+
if (this == &rhs) {
|
92 |
+
return *this;
|
93 |
+
}
|
94 |
+
if (C10_LIKELY(rhs.isInline())) {
|
95 |
+
if (C10_UNLIKELY(!isInline())) {
|
96 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
97 |
+
free(outOfLineStorage_);
|
98 |
+
}
|
99 |
+
copyDataInline(rhs);
|
100 |
+
} else {
|
101 |
+
// They're outline. We're going to steal their vector.
|
102 |
+
if (!isInline()) {
|
103 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
104 |
+
free(outOfLineStorage_);
|
105 |
+
}
|
106 |
+
outOfLineStorage_ = rhs.outOfLineStorage_;
|
107 |
+
rhs.outOfLineStorage_ = nullptr;
|
108 |
+
}
|
109 |
+
size_ = rhs.size_;
|
110 |
+
rhs.size_ = 0;
|
111 |
+
|
112 |
+
return *this;
|
113 |
+
}
|
114 |
+
|
115 |
+
size_t size() const noexcept {
|
116 |
+
return size_;
|
117 |
+
}
|
118 |
+
|
119 |
+
const int64_t* sizes_data() const noexcept {
|
120 |
+
if (C10_LIKELY(isInline())) {
|
121 |
+
return &inlineStorage_[0];
|
122 |
+
} else {
|
123 |
+
return &outOfLineStorage_[0];
|
124 |
+
}
|
125 |
+
}
|
126 |
+
|
127 |
+
int64_t* sizes_data() noexcept {
|
128 |
+
if (C10_LIKELY(isInline())) {
|
129 |
+
return &inlineStorage_[0];
|
130 |
+
} else {
|
131 |
+
return &outOfLineStorage_[0];
|
132 |
+
}
|
133 |
+
}
|
134 |
+
|
135 |
+
sizes_const_iterator sizes_begin() const noexcept {
|
136 |
+
return sizes_data();
|
137 |
+
}
|
138 |
+
|
139 |
+
sizes_iterator sizes_begin() noexcept {
|
140 |
+
return sizes_data();
|
141 |
+
}
|
142 |
+
|
143 |
+
sizes_const_iterator sizes_end() const noexcept {
|
144 |
+
return sizes_begin() + size();
|
145 |
+
}
|
146 |
+
|
147 |
+
sizes_iterator sizes_end() noexcept {
|
148 |
+
return sizes_begin() + size();
|
149 |
+
}
|
150 |
+
|
151 |
+
IntArrayRef sizes_arrayref() const noexcept {
|
152 |
+
return IntArrayRef{sizes_data(), size()};
|
153 |
+
}
|
154 |
+
|
155 |
+
void set_sizes(IntArrayRef newSizes) {
|
156 |
+
resize(newSizes.size());
|
157 |
+
std::copy(newSizes.begin(), newSizes.end(), sizes_begin());
|
158 |
+
}
|
159 |
+
|
160 |
+
void set_strides(IntArrayRef strides) {
|
161 |
+
TORCH_INTERNAL_ASSERT(strides.size() == size());
|
162 |
+
std::copy(strides.begin(), strides.end(), strides_begin());
|
163 |
+
}
|
164 |
+
|
165 |
+
const int64_t* strides_data() const noexcept {
|
166 |
+
if (C10_LIKELY(isInline())) {
|
167 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
168 |
+
} else {
|
169 |
+
return &outOfLineStorage_[size()];
|
170 |
+
}
|
171 |
+
}
|
172 |
+
|
173 |
+
int64_t* strides_data() noexcept {
|
174 |
+
if (C10_LIKELY(isInline())) {
|
175 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
176 |
+
} else {
|
177 |
+
return &outOfLineStorage_[size()];
|
178 |
+
}
|
179 |
+
}
|
180 |
+
|
181 |
+
strides_const_iterator strides_begin() const noexcept {
|
182 |
+
if (C10_LIKELY(isInline())) {
|
183 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
184 |
+
} else {
|
185 |
+
return &outOfLineStorage_[size()];
|
186 |
+
}
|
187 |
+
}
|
188 |
+
|
189 |
+
strides_iterator strides_begin() noexcept {
|
190 |
+
if (C10_LIKELY(isInline())) {
|
191 |
+
return &inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE];
|
192 |
+
} else {
|
193 |
+
return &outOfLineStorage_[size()];
|
194 |
+
}
|
195 |
+
}
|
196 |
+
|
197 |
+
strides_const_iterator strides_end() const noexcept {
|
198 |
+
return strides_begin() + size();
|
199 |
+
}
|
200 |
+
|
201 |
+
strides_iterator strides_end() noexcept {
|
202 |
+
return strides_begin() + size();
|
203 |
+
}
|
204 |
+
|
205 |
+
IntArrayRef strides_arrayref() const noexcept {
|
206 |
+
return IntArrayRef{strides_data(), size()};
|
207 |
+
}
|
208 |
+
|
209 |
+
// Size accessors.
|
210 |
+
int64_t size_at(size_t idx) const noexcept {
|
211 |
+
assert(idx < size());
|
212 |
+
return sizes_data()[idx];
|
213 |
+
}
|
214 |
+
|
215 |
+
int64_t& size_at(size_t idx) noexcept {
|
216 |
+
assert(idx < size());
|
217 |
+
return sizes_data()[idx];
|
218 |
+
}
|
219 |
+
|
220 |
+
int64_t size_at_unchecked(size_t idx) const noexcept {
|
221 |
+
return sizes_data()[idx];
|
222 |
+
}
|
223 |
+
|
224 |
+
int64_t& size_at_unchecked(size_t idx) noexcept {
|
225 |
+
return sizes_data()[idx];
|
226 |
+
}
|
227 |
+
|
228 |
+
// Size accessors.
|
229 |
+
int64_t stride_at(size_t idx) const noexcept {
|
230 |
+
assert(idx < size());
|
231 |
+
return strides_data()[idx];
|
232 |
+
}
|
233 |
+
|
234 |
+
int64_t& stride_at(size_t idx) noexcept {
|
235 |
+
assert(idx < size());
|
236 |
+
return strides_data()[idx];
|
237 |
+
}
|
238 |
+
|
239 |
+
int64_t stride_at_unchecked(size_t idx) const noexcept {
|
240 |
+
return strides_data()[idx];
|
241 |
+
}
|
242 |
+
|
243 |
+
int64_t& stride_at_unchecked(size_t idx) noexcept {
|
244 |
+
return strides_data()[idx];
|
245 |
+
}
|
246 |
+
|
247 |
+
void resize(size_t newSize) {
|
248 |
+
const auto oldSize = size();
|
249 |
+
if (newSize == oldSize) {
|
250 |
+
return;
|
251 |
+
}
|
252 |
+
if (C10_LIKELY(
|
253 |
+
newSize <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE && isInline())) {
|
254 |
+
if (oldSize < newSize) {
|
255 |
+
const auto bytesToZero =
|
256 |
+
(newSize - oldSize) * sizeof(inlineStorage_[0]);
|
257 |
+
memset(&inlineStorage_[oldSize], 0, bytesToZero);
|
258 |
+
memset(
|
259 |
+
&inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE + oldSize],
|
260 |
+
0,
|
261 |
+
bytesToZero);
|
262 |
+
}
|
263 |
+
size_ = newSize;
|
264 |
+
} else {
|
265 |
+
resizeSlowPath(newSize, oldSize);
|
266 |
+
}
|
267 |
+
}
|
268 |
+
|
269 |
+
void resizeSlowPath(size_t newSize, size_t oldSize);
|
270 |
+
|
271 |
+
private:
|
272 |
+
bool isInline() const noexcept {
|
273 |
+
return size_ <= C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE;
|
274 |
+
}
|
275 |
+
|
276 |
+
void copyDataInline(const SizesAndStrides& rhs) {
|
277 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.isInline());
|
278 |
+
memcpy(inlineStorage_, rhs.inlineStorage_, sizeof(inlineStorage_));
|
279 |
+
}
|
280 |
+
|
281 |
+
static size_t storageBytes(size_t size) noexcept {
|
282 |
+
return size * 2 * sizeof(int64_t);
|
283 |
+
}
|
284 |
+
|
285 |
+
void allocateOutOfLineStorage(size_t size) {
|
286 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
287 |
+
outOfLineStorage_ = static_cast<int64_t*>(malloc(storageBytes(size)));
|
288 |
+
TORCH_CHECK(
|
289 |
+
outOfLineStorage_,
|
290 |
+
"Could not allocate memory for Tensor SizesAndStrides!");
|
291 |
+
}
|
292 |
+
|
293 |
+
void resizeOutOfLineStorage(size_t newSize) {
|
294 |
+
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!isInline());
|
295 |
+
outOfLineStorage_ = static_cast<int64_t*>(
|
296 |
+
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
|
297 |
+
realloc(outOfLineStorage_, storageBytes(newSize)));
|
298 |
+
TORCH_CHECK(
|
299 |
+
outOfLineStorage_,
|
300 |
+
"Could not allocate memory for Tensor SizesAndStrides!");
|
301 |
+
}
|
302 |
+
|
303 |
+
void copyDataOutline(const SizesAndStrides& rhs) noexcept {
|
304 |
+
memcpy(outOfLineStorage_, rhs.outOfLineStorage_, storageBytes(rhs.size_));
|
305 |
+
}
|
306 |
+
|
307 |
+
size_t size_{1};
|
308 |
+
union {
|
309 |
+
int64_t* outOfLineStorage_;
|
310 |
+
// NOLINTNEXTLINE(*c-array*)
|
311 |
+
int64_t inlineStorage_[C10_SIZES_AND_STRIDES_MAX_INLINE_SIZE * 2]{};
|
312 |
+
};
|
313 |
+
};
|
314 |
+
|
315 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/TorchDispatchModeTLS.h
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/SafePyObject.h>
|
4 |
+
#include <c10/macros/Export.h>
|
5 |
+
|
6 |
+
namespace c10::impl {
|
7 |
+
|
8 |
+
enum class TorchDispatchModeKey : int8_t {
|
9 |
+
FAKE,
|
10 |
+
PROXY,
|
11 |
+
FUNCTIONAL,
|
12 |
+
NUM_MODE_KEYS
|
13 |
+
};
|
14 |
+
|
15 |
+
struct C10_API TorchDispatchModeTLS {
|
16 |
+
// This API is NOT invariant safe.
|
17 |
+
// It must not take in an infra mode that uses TorchDispatchModeKey
|
18 |
+
// If you're pushing an infra mode onto the stack, we expect
|
19 |
+
// you to use set_mode
|
20 |
+
static void push_non_infra_mode_onto_stack(
|
21 |
+
std::shared_ptr<SafePyObject> mode);
|
22 |
+
// Pops the top mode of the stack,
|
23 |
+
// giving precedence to user modes before attempting to pop
|
24 |
+
// any infra modes
|
25 |
+
static const std::shared_ptr<SafePyObject> pop_stack();
|
26 |
+
// Returns the highest-priority infra mode on the stack,
|
27 |
+
// along with its mode key.
|
28 |
+
static const std::tuple<std::shared_ptr<SafePyObject>, TorchDispatchModeKey>
|
29 |
+
pop_highest_infra_mode();
|
30 |
+
|
31 |
+
static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx);
|
32 |
+
static int64_t stack_len();
|
33 |
+
|
34 |
+
static const c10::optional<std::shared_ptr<SafePyObject>> get_mode(
|
35 |
+
TorchDispatchModeKey mode_key);
|
36 |
+
static const c10::optional<std::shared_ptr<SafePyObject>> unset_mode(
|
37 |
+
TorchDispatchModeKey mode_key);
|
38 |
+
static void set_mode(
|
39 |
+
const std::shared_ptr<SafePyObject>& mode,
|
40 |
+
TorchDispatchModeKey mode_key);
|
41 |
+
|
42 |
+
static const TorchDispatchModeTLS& get_state();
|
43 |
+
static void set_state(TorchDispatchModeTLS state);
|
44 |
+
|
45 |
+
static bool any_modes_set(bool skip_infra_modes = false);
|
46 |
+
|
47 |
+
private:
|
48 |
+
std::vector<std::shared_ptr<c10::SafePyObject>> stack_;
|
49 |
+
// Users are allowed to push multiple ProxyTorchDispatchMode objects onto the
|
50 |
+
// stack
|
51 |
+
// However, we only allow a single FakeTensorMode onto the stack at a time
|
52 |
+
// (Pushing additional FakeTensorModes onto the stack is a no-op)
|
53 |
+
std::array<
|
54 |
+
c10::optional<std::shared_ptr<c10::SafePyObject>>,
|
55 |
+
static_cast<size_t>(TorchDispatchModeKey::NUM_MODE_KEYS)>
|
56 |
+
infra_modes_;
|
57 |
+
};
|
58 |
+
|
59 |
+
C10_API bool dispatch_mode_enabled();
|
60 |
+
|
61 |
+
C10_API std::string to_string(TorchDispatchModeKey mode_key);
|
62 |
+
|
63 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/VirtualGuardImpl.h
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
4 |
+
|
5 |
+
namespace c10::impl {
|
6 |
+
|
7 |
+
/**
|
8 |
+
* An implementation of DeviceGuardImplInterface which delegates
|
9 |
+
* to virtual dispatch on the DeviceGuardImpl registry.
|
10 |
+
*/
|
11 |
+
class VirtualGuardImpl final : public DeviceGuardImplInterface {
|
12 |
+
public:
|
13 |
+
VirtualGuardImpl(DeviceType device_type)
|
14 |
+
: impl_(getDeviceGuardImpl(device_type)) {}
|
15 |
+
// This constructor exists purely for testing
|
16 |
+
VirtualGuardImpl(const DeviceGuardImplInterface* impl) : impl_(impl) {}
|
17 |
+
|
18 |
+
// Copying and moving is OK!
|
19 |
+
VirtualGuardImpl(const VirtualGuardImpl&) = default;
|
20 |
+
VirtualGuardImpl& operator=(const VirtualGuardImpl&) = default;
|
21 |
+
VirtualGuardImpl(VirtualGuardImpl&&) noexcept = default;
|
22 |
+
VirtualGuardImpl& operator=(VirtualGuardImpl&&) noexcept = default;
|
23 |
+
|
24 |
+
DeviceType type() const override {
|
25 |
+
return impl_->type();
|
26 |
+
}
|
27 |
+
Device exchangeDevice(Device d) const override {
|
28 |
+
return impl_->exchangeDevice(d);
|
29 |
+
}
|
30 |
+
Device getDevice() const override {
|
31 |
+
return impl_->getDevice();
|
32 |
+
}
|
33 |
+
void setDevice(Device d) const override {
|
34 |
+
impl_->setDevice(d);
|
35 |
+
}
|
36 |
+
void uncheckedSetDevice(Device d) const noexcept override {
|
37 |
+
impl_->uncheckedSetDevice(d);
|
38 |
+
}
|
39 |
+
Stream getStream(Device d) const noexcept override {
|
40 |
+
return impl_->getStream(d);
|
41 |
+
}
|
42 |
+
Stream getDefaultStream(Device d) const override {
|
43 |
+
return impl_->getDefaultStream(d);
|
44 |
+
}
|
45 |
+
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
|
46 |
+
const override {
|
47 |
+
return impl_->getStreamFromGlobalPool(d, isHighPriority);
|
48 |
+
}
|
49 |
+
Stream exchangeStream(Stream s) const noexcept override {
|
50 |
+
return impl_->exchangeStream(s);
|
51 |
+
}
|
52 |
+
DeviceIndex deviceCount() const noexcept override {
|
53 |
+
return impl_->deviceCount();
|
54 |
+
}
|
55 |
+
|
56 |
+
// Event functions
|
57 |
+
void record(
|
58 |
+
void** event,
|
59 |
+
const Stream& stream,
|
60 |
+
const DeviceIndex device_index,
|
61 |
+
const EventFlag flag) const override {
|
62 |
+
impl_->record(event, stream, device_index, flag);
|
63 |
+
}
|
64 |
+
void block(void* event, const Stream& stream) const override {
|
65 |
+
impl_->block(event, stream);
|
66 |
+
}
|
67 |
+
bool queryEvent(void* event) const override {
|
68 |
+
return impl_->queryEvent(event);
|
69 |
+
}
|
70 |
+
void destroyEvent(void* event, const DeviceIndex device_index)
|
71 |
+
const noexcept override {
|
72 |
+
impl_->destroyEvent(event, device_index);
|
73 |
+
}
|
74 |
+
|
75 |
+
bool queryStream(const Stream& stream) const override {
|
76 |
+
return impl_->queryStream(stream);
|
77 |
+
}
|
78 |
+
void synchronizeStream(const Stream& stream) const override {
|
79 |
+
impl_->synchronizeStream(stream);
|
80 |
+
}
|
81 |
+
|
82 |
+
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
|
83 |
+
const override {
|
84 |
+
impl_->recordDataPtrOnStream(data_ptr, stream);
|
85 |
+
}
|
86 |
+
|
87 |
+
private:
|
88 |
+
const DeviceGuardImplInterface* impl_ = nullptr;
|
89 |
+
};
|
90 |
+
|
91 |
+
} // namespace c10::impl
|
venv/lib/python3.10/site-packages/torch/include/c10/core/impl/alloc_cpu.h
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/macros/Export.h>
|
4 |
+
|
5 |
+
#include <cstddef>
|
6 |
+
|
7 |
+
namespace c10 {
|
8 |
+
|
9 |
+
C10_API void* alloc_cpu(size_t nbytes);
|
10 |
+
C10_API void free_cpu(void* data);
|
11 |
+
|
12 |
+
} // namespace c10
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUCachingAllocator.h
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Allocator.h>
|
4 |
+
#include <c10/xpu/XPUStream.h>
|
5 |
+
|
6 |
+
namespace c10::xpu::XPUCachingAllocator {
|
7 |
+
|
8 |
+
C10_XPU_API Allocator* get();
|
9 |
+
|
10 |
+
C10_XPU_API void init(DeviceIndex device_count);
|
11 |
+
|
12 |
+
C10_XPU_API void emptyCache();
|
13 |
+
|
14 |
+
C10_XPU_API void* raw_alloc(size_t size);
|
15 |
+
|
16 |
+
C10_XPU_API void raw_delete(void* ptr);
|
17 |
+
|
18 |
+
C10_XPU_API void recordStream(const DataPtr& dataPtr, XPUStream stream);
|
19 |
+
|
20 |
+
} // namespace c10::xpu::XPUCachingAllocator
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUDeviceProp.h
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/xpu/XPUMacros.h>
|
4 |
+
#include <sycl/sycl.hpp>
|
5 |
+
|
6 |
+
namespace c10::xpu {
|
7 |
+
|
8 |
+
#define AT_FORALL_XPU_DEVICE_PROPERTIES(_) \
|
9 |
+
/* the device name of this SYCL device. */ \
|
10 |
+
_(name) \
|
11 |
+
\
|
12 |
+
/* the device type associated with the device. */ \
|
13 |
+
_(device_type) \
|
14 |
+
\
|
15 |
+
/* the vendor of this SYCL device. */ \
|
16 |
+
_(vendor) \
|
17 |
+
\
|
18 |
+
/* a backend-defined driver version as a std::string. */ \
|
19 |
+
_(driver_version) \
|
20 |
+
\
|
21 |
+
/* the SYCL version as a std::string in the form <major>.<minor> */ \
|
22 |
+
_(version) \
|
23 |
+
\
|
24 |
+
/* true if the SYCL device is available. Otherwise, return false. */ \
|
25 |
+
_(is_available) \
|
26 |
+
\
|
27 |
+
/* the maximum size in bytes of the arguments that can be passed to a \
|
28 |
+
* kernel. */ \
|
29 |
+
_(max_parameter_size) \
|
30 |
+
\
|
31 |
+
/* the number of parallel compute units available to the device. */ \
|
32 |
+
_(max_compute_units) \
|
33 |
+
\
|
34 |
+
/* the maximum dimensions that specify the global and local work-item IDs \
|
35 |
+
* used by the data parallel execution model. */ \
|
36 |
+
_(max_work_item_dimensions) \
|
37 |
+
\
|
38 |
+
/* the maximum number of workitems that are permitted in a work-group \
|
39 |
+
* executing a kernel on a single compute unit. */ \
|
40 |
+
_(max_work_group_size) \
|
41 |
+
\
|
42 |
+
/* the maximum number of subgroups in a work-group for any kernel executed \
|
43 |
+
* on the device. */ \
|
44 |
+
_(max_num_sub_groups) \
|
45 |
+
\
|
46 |
+
/* a std::vector of size_t containing the set of sub-group sizes supported \
|
47 |
+
* by the device. */ \
|
48 |
+
_(sub_group_sizes) \
|
49 |
+
\
|
50 |
+
/* the maximum configured clock frequency of this SYCL device in MHz. */ \
|
51 |
+
_(max_clock_frequency) \
|
52 |
+
\
|
53 |
+
/* the default compute device address space size specified as an unsigned \
|
54 |
+
* integer value in bits. Must return either 32 or 64. */ \
|
55 |
+
_(address_bits) \
|
56 |
+
\
|
57 |
+
/* the maximum size of memory object allocation in bytes. */ \
|
58 |
+
_(max_mem_alloc_size) \
|
59 |
+
\
|
60 |
+
/* the minimum value in bits of the largest supported SYCL built-in data \
|
61 |
+
* type if this SYCL device is not of device type \
|
62 |
+
* sycl::info::device_type::custom. */ \
|
63 |
+
_(mem_base_addr_align) \
|
64 |
+
\
|
65 |
+
/* a std::vector of info::fp_config describing the half/single/double \
|
66 |
+
* precision floating-point capability of this SYCL device. */ \
|
67 |
+
_(half_fp_config) \
|
68 |
+
_(single_fp_config) \
|
69 |
+
_(double_fp_config) \
|
70 |
+
\
|
71 |
+
/* the size of global device memory in bytes. */ \
|
72 |
+
_(global_mem_size) \
|
73 |
+
\
|
74 |
+
/* the type of global memory cache supported. */ \
|
75 |
+
_(global_mem_cache_type) \
|
76 |
+
\
|
77 |
+
/* the size of global memory cache in bytes. */ \
|
78 |
+
_(global_mem_cache_size) \
|
79 |
+
\
|
80 |
+
/* the size of global memory cache line in bytes. */ \
|
81 |
+
_(global_mem_cache_line_size) \
|
82 |
+
\
|
83 |
+
/* the type of local memory supported. */ \
|
84 |
+
_(local_mem_type) \
|
85 |
+
\
|
86 |
+
/* the size of local memory arena in bytes. */ \
|
87 |
+
_(local_mem_size) \
|
88 |
+
\
|
89 |
+
/* the maximum number of sub-devices that can be created when this device is \
|
90 |
+
* partitioned. */ \
|
91 |
+
_(partition_max_sub_devices) \
|
92 |
+
\
|
93 |
+
/* the resolution of device timer in nanoseconds. */ \
|
94 |
+
_(profiling_timer_resolution) \
|
95 |
+
\
|
96 |
+
/* the preferred native vector width size for built-in scalar types that can \
|
97 |
+
* be put into vectors. */ \
|
98 |
+
_(preferred_vector_width_char) \
|
99 |
+
_(preferred_vector_width_short) \
|
100 |
+
_(preferred_vector_width_int) \
|
101 |
+
_(preferred_vector_width_long) \
|
102 |
+
_(preferred_vector_width_float) \
|
103 |
+
_(preferred_vector_width_double) \
|
104 |
+
_(preferred_vector_width_half) \
|
105 |
+
\
|
106 |
+
/* the native ISA vector width. The vector width is defined as the number of \
|
107 |
+
* scalar elements that can be stored in the vector. */ \
|
108 |
+
_(native_vector_width_char) \
|
109 |
+
_(native_vector_width_short) \
|
110 |
+
_(native_vector_width_int) \
|
111 |
+
_(native_vector_width_long) \
|
112 |
+
_(native_vector_width_float) \
|
113 |
+
_(native_vector_width_double) \
|
114 |
+
_(native_vector_width_half)
|
115 |
+
|
116 |
+
#define AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(_) \
|
117 |
+
/* the number of EUs associated with the Intel GPU. */ \
|
118 |
+
_(gpu_eu_count, 512) \
|
119 |
+
\
|
120 |
+
/* the number of EUs in a subslice. */ \
|
121 |
+
_(gpu_eu_count_per_subslice, 8) \
|
122 |
+
\
|
123 |
+
/* the simd width of EU of GPU. */ \
|
124 |
+
_(gpu_eu_simd_width, 8) \
|
125 |
+
\
|
126 |
+
/* the number of hardware threads per EU of GPU. */ \
|
127 |
+
_(gpu_hw_threads_per_eu, 8)
|
128 |
+
|
129 |
+
#define _DEFINE_SYCL_PROP(ns, property, member) \
|
130 |
+
ns::property::return_type member;
|
131 |
+
|
132 |
+
#define DEFINE_DEVICE_PROP(property) \
|
133 |
+
_DEFINE_SYCL_PROP(sycl::info::device, property, property)
|
134 |
+
|
135 |
+
#define DEFINE_PLATFORM_PROP(property, member) \
|
136 |
+
_DEFINE_SYCL_PROP(sycl::info::platform, property, member)
|
137 |
+
|
138 |
+
#define DEFINE_EXT_DEVICE_PROP(property, ...) \
|
139 |
+
_DEFINE_SYCL_PROP(sycl::ext::intel::info::device, property, property)
|
140 |
+
|
141 |
+
struct C10_XPU_API DeviceProp {
|
142 |
+
AT_FORALL_XPU_DEVICE_PROPERTIES(DEFINE_DEVICE_PROP);
|
143 |
+
|
144 |
+
// the platform name.
|
145 |
+
DEFINE_PLATFORM_PROP(name, platform_name);
|
146 |
+
|
147 |
+
AT_FORALL_XPU_EXT_DEVICE_PROPERTIES(DEFINE_EXT_DEVICE_PROP)
|
148 |
+
};
|
149 |
+
|
150 |
+
#undef _DEFINE_SYCL_PROP
|
151 |
+
#undef DEFINE_DEVICE_PROP
|
152 |
+
#undef DEFINE_PLATFORM_PROP
|
153 |
+
#undef DEFINE_EXT_DEVICE_PROP
|
154 |
+
|
155 |
+
} // namespace c10::xpu
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUException.h
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/util/Exception.h>
|
4 |
+
#include <sycl/sycl.hpp>
|
5 |
+
|
6 |
+
namespace c10::xpu {
|
7 |
+
|
8 |
+
static inline sycl::async_handler asyncHandler = [](sycl::exception_list el) {
|
9 |
+
if (el.size() == 0) {
|
10 |
+
return;
|
11 |
+
}
|
12 |
+
for (const auto& e : el) {
|
13 |
+
try {
|
14 |
+
std::rethrow_exception(e);
|
15 |
+
} catch (sycl::exception& e) {
|
16 |
+
TORCH_WARN("SYCL Exception: ", e.what());
|
17 |
+
}
|
18 |
+
}
|
19 |
+
throw;
|
20 |
+
};
|
21 |
+
|
22 |
+
} // namespace c10::xpu
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUFunctions.h
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Device.h>
|
4 |
+
#include <c10/xpu/XPUDeviceProp.h>
|
5 |
+
#include <c10/xpu/XPUMacros.h>
|
6 |
+
|
7 |
+
// The naming convention used here matches the naming convention of torch.xpu
|
8 |
+
|
9 |
+
namespace c10::xpu {
|
10 |
+
|
11 |
+
// Log a warning only once if no devices are detected.
|
12 |
+
C10_XPU_API DeviceIndex device_count();
|
13 |
+
|
14 |
+
// Throws an error if no devices are detected.
|
15 |
+
C10_XPU_API DeviceIndex device_count_ensure_non_zero();
|
16 |
+
|
17 |
+
C10_XPU_API DeviceIndex current_device();
|
18 |
+
|
19 |
+
C10_XPU_API void set_device(DeviceIndex device);
|
20 |
+
|
21 |
+
C10_XPU_API DeviceIndex exchange_device(DeviceIndex device);
|
22 |
+
|
23 |
+
C10_XPU_API DeviceIndex maybe_exchange_device(DeviceIndex to_device);
|
24 |
+
|
25 |
+
C10_XPU_API sycl::device& get_raw_device(DeviceIndex device);
|
26 |
+
|
27 |
+
C10_XPU_API sycl::context& get_device_context();
|
28 |
+
|
29 |
+
C10_XPU_API void get_device_properties(
|
30 |
+
DeviceProp* device_prop,
|
31 |
+
DeviceIndex device);
|
32 |
+
|
33 |
+
C10_XPU_API DeviceIndex get_device_idx_from_pointer(void* ptr);
|
34 |
+
|
35 |
+
} // namespace c10::xpu
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUMacros.h
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
// See c10/macros/Export.h for a detailed explanation of what the function
|
4 |
+
// of these macros are. We need one set of macros for every separate library
|
5 |
+
// we build.
|
6 |
+
|
7 |
+
#if defined(__GNUC__)
|
8 |
+
#define C10_XPU_EXPORT __attribute__((__visibility__("default")))
|
9 |
+
#else // defined(__GNUC__)
|
10 |
+
#define C10_XPU_EXPORT
|
11 |
+
#endif // defined(__GNUC__)
|
12 |
+
#define C10_XPU_IMPORT C10_XPU_EXPORT
|
13 |
+
|
14 |
+
// This one is being used by libc10_xpu.so
|
15 |
+
#ifdef C10_XPU_BUILD_MAIN_LIB
|
16 |
+
#define C10_XPU_API C10_XPU_EXPORT
|
17 |
+
#else
|
18 |
+
#define C10_XPU_API C10_XPU_IMPORT
|
19 |
+
#endif
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/XPUStream.h
ADDED
@@ -0,0 +1,162 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/Stream.h>
|
4 |
+
#include <c10/xpu/XPUFunctions.h>
|
5 |
+
|
6 |
+
namespace c10::xpu {
|
7 |
+
|
8 |
+
/*
|
9 |
+
* Note [Stream Management]
|
10 |
+
*
|
11 |
+
* An XPUStream is an abstraction of an actual SYCL queue in which SYCL kernel
|
12 |
+
* can execute. Currently, there are several pools per device to manage SYCL
|
13 |
+
* queue, and a device's pool is lazily created.
|
14 |
+
*
|
15 |
+
* There are two pools per device. The first pool contains "normal priority"
|
16 |
+
* queues. The second pool is the "high priority" queues. There are 32 queues in
|
17 |
+
* per pool per device, and when a queue is requested one of these queues is
|
18 |
+
* returned round-robin. That is, the first queue requested is at index 0, the
|
19 |
+
* second at index 1... to index 31, then index 0 again.
|
20 |
+
*
|
21 |
+
* This means that if 33 queues are requested, the first and last queues
|
22 |
+
* requested are actually the same queue (under the covers) and kernels enqueued
|
23 |
+
* on them cannot run concurrently.
|
24 |
+
*
|
25 |
+
* It is safe to enqueue a kernel on the same queue from two different
|
26 |
+
* threads as the SYCL specification described.
|
27 |
+
*/
|
28 |
+
|
29 |
+
static constexpr int max_compile_time_stream_priorities = 2;
|
30 |
+
|
31 |
+
/*
|
32 |
+
* This serves as a wrapper around c10::Stream and acts as a representation for
|
33 |
+
* a SYCL queue. On each device, a SYCL queue pool consists of kStreamsPerPool
|
34 |
+
* queues, and you can access a particular queue by its index. The index is
|
35 |
+
* extracted from XPUStream.id().
|
36 |
+
*/
|
37 |
+
class C10_XPU_API XPUStream {
|
38 |
+
public:
|
39 |
+
enum Unchecked { UNCHECKED };
|
40 |
+
|
41 |
+
// Construct a XPUStream from a Stream. This construction is checked, and
|
42 |
+
// will raise an error if the Stream is not, in fact, a XPU stream.
|
43 |
+
explicit XPUStream(Stream stream) : stream_(stream) {
|
44 |
+
TORCH_CHECK(stream_.device_type() == DeviceType::XPU);
|
45 |
+
}
|
46 |
+
|
47 |
+
// Construct a XPUStream from a Stream with no error checking.
|
48 |
+
explicit XPUStream(Unchecked, Stream stream) : stream_(stream) {}
|
49 |
+
|
50 |
+
bool operator==(const XPUStream& other) const noexcept {
|
51 |
+
return unwrap() == other.unwrap();
|
52 |
+
}
|
53 |
+
|
54 |
+
bool operator!=(const XPUStream& other) const noexcept {
|
55 |
+
return unwrap() != other.unwrap();
|
56 |
+
}
|
57 |
+
|
58 |
+
operator sycl::queue&() const {
|
59 |
+
return queue();
|
60 |
+
}
|
61 |
+
|
62 |
+
operator Stream() const {
|
63 |
+
return unwrap();
|
64 |
+
}
|
65 |
+
|
66 |
+
DeviceType device_type() const {
|
67 |
+
return DeviceType::XPU;
|
68 |
+
}
|
69 |
+
|
70 |
+
DeviceIndex device_index() const {
|
71 |
+
return stream_.device_index();
|
72 |
+
}
|
73 |
+
|
74 |
+
Device device() const {
|
75 |
+
return Device(DeviceType::XPU, device_index());
|
76 |
+
}
|
77 |
+
|
78 |
+
// Return the stream ID corresponding to this particular stream. StreamId is
|
79 |
+
/// a int64_t representation generated by its type and index.
|
80 |
+
StreamId id() const {
|
81 |
+
return stream_.id();
|
82 |
+
}
|
83 |
+
|
84 |
+
bool query() const {
|
85 |
+
return queue().ext_oneapi_empty();
|
86 |
+
}
|
87 |
+
|
88 |
+
void synchronize() const {
|
89 |
+
queue().wait_and_throw();
|
90 |
+
}
|
91 |
+
|
92 |
+
int priority() const;
|
93 |
+
|
94 |
+
// Explicit conversion to sycl::queue&.
|
95 |
+
sycl::queue& queue() const;
|
96 |
+
|
97 |
+
Stream unwrap() const {
|
98 |
+
return stream_;
|
99 |
+
}
|
100 |
+
|
101 |
+
struct c10::StreamData3 pack3() const {
|
102 |
+
return stream_.pack3();
|
103 |
+
}
|
104 |
+
|
105 |
+
static XPUStream unpack3(
|
106 |
+
StreamId stream_id,
|
107 |
+
DeviceIndex device_index,
|
108 |
+
DeviceType device_type) {
|
109 |
+
return XPUStream(Stream::unpack3(stream_id, device_index, device_type));
|
110 |
+
}
|
111 |
+
|
112 |
+
static std::tuple<int, int> priority_range() {
|
113 |
+
return std::make_tuple(0, -max_compile_time_stream_priorities + 1);
|
114 |
+
}
|
115 |
+
|
116 |
+
private:
|
117 |
+
Stream stream_;
|
118 |
+
};
|
119 |
+
|
120 |
+
/**
|
121 |
+
* Get a stream from the pool in a round-robin fashion.
|
122 |
+
*
|
123 |
+
* You can request a stream from the high priority pool by setting
|
124 |
+
* isHighPriority to true, or a priority value for a specific device by setting
|
125 |
+
* device.
|
126 |
+
*/
|
127 |
+
C10_XPU_API XPUStream
|
128 |
+
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
|
129 |
+
// The priority number lower, the priority higher.
|
130 |
+
C10_XPU_API XPUStream
|
131 |
+
getStreamFromPool(const int priority, DeviceIndex device = -1);
|
132 |
+
|
133 |
+
/**
|
134 |
+
* Get the current XPU stream, for the passed XPU device, or for the current
|
135 |
+
* device if no device index is passed.
|
136 |
+
*/
|
137 |
+
C10_XPU_API XPUStream getCurrentXPUStream(DeviceIndex device = -1);
|
138 |
+
|
139 |
+
/**
|
140 |
+
* Set the current stream on the device of the passed in stream to be the passed
|
141 |
+
* in stream.
|
142 |
+
*/
|
143 |
+
C10_XPU_API void setCurrentXPUStream(XPUStream stream);
|
144 |
+
|
145 |
+
C10_XPU_API std::ostream& operator<<(std::ostream& stream, const XPUStream& s);
|
146 |
+
|
147 |
+
/**
|
148 |
+
* Block all reserved SYCL queues in the stream pools on the device, and wait
|
149 |
+
* for their synchronizations.
|
150 |
+
*/
|
151 |
+
C10_XPU_API void syncStreamsOnDevice(DeviceIndex device = -1);
|
152 |
+
|
153 |
+
} // namespace c10::xpu
|
154 |
+
|
155 |
+
namespace std {
|
156 |
+
template <>
|
157 |
+
struct hash<c10::xpu::XPUStream> {
|
158 |
+
size_t operator()(c10::xpu::XPUStream s) const noexcept {
|
159 |
+
return std::hash<c10::Stream>{}(s.unwrap());
|
160 |
+
}
|
161 |
+
};
|
162 |
+
} // namespace std
|
venv/lib/python3.10/site-packages/torch/include/c10/xpu/impl/XPUGuardImpl.h
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#pragma once
|
2 |
+
|
3 |
+
#include <c10/core/DeviceGuard.h>
|
4 |
+
#include <c10/core/impl/DeviceGuardImplInterface.h>
|
5 |
+
#include <c10/xpu/XPUCachingAllocator.h>
|
6 |
+
#include <c10/xpu/XPUFunctions.h>
|
7 |
+
#include <c10/xpu/XPUStream.h>
|
8 |
+
|
9 |
+
#include <vector>
|
10 |
+
|
11 |
+
namespace c10::xpu::impl {
|
12 |
+
|
13 |
+
struct XPUGuardImpl final : public c10::impl::DeviceGuardImplInterface {
|
14 |
+
static constexpr DeviceType static_type = kXPU;
|
15 |
+
|
16 |
+
XPUGuardImpl() = default;
|
17 |
+
|
18 |
+
explicit XPUGuardImpl(DeviceType t) {
|
19 |
+
TORCH_INTERNAL_ASSERT(t == kXPU);
|
20 |
+
}
|
21 |
+
|
22 |
+
DeviceType type() const override {
|
23 |
+
return kXPU;
|
24 |
+
}
|
25 |
+
|
26 |
+
Device exchangeDevice(Device d) const override {
|
27 |
+
TORCH_INTERNAL_ASSERT(d.is_xpu());
|
28 |
+
const auto old_device_index = c10::xpu::exchange_device(d.index());
|
29 |
+
return Device(kXPU, old_device_index);
|
30 |
+
}
|
31 |
+
|
32 |
+
Device getDevice() const override {
|
33 |
+
const auto device = c10::xpu::current_device();
|
34 |
+
return Device(kXPU, device);
|
35 |
+
}
|
36 |
+
|
37 |
+
void setDevice(Device d) const override {
|
38 |
+
TORCH_INTERNAL_ASSERT(d.is_xpu());
|
39 |
+
c10::xpu::set_device(d.index());
|
40 |
+
}
|
41 |
+
|
42 |
+
void uncheckedSetDevice(Device d) const noexcept override {
|
43 |
+
c10::xpu::set_device(d.index());
|
44 |
+
}
|
45 |
+
|
46 |
+
Stream getStream(Device d) const noexcept override {
|
47 |
+
return getCurrentXPUStream(d.index()).unwrap();
|
48 |
+
}
|
49 |
+
|
50 |
+
Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false)
|
51 |
+
const override {
|
52 |
+
return getStreamFromPool(isHighPriority, d.index());
|
53 |
+
}
|
54 |
+
|
55 |
+
// NB: These do NOT set the current device
|
56 |
+
Stream exchangeStream(Stream s) const noexcept override {
|
57 |
+
const XPUStream stream(s);
|
58 |
+
const auto old_stream = getCurrentXPUStream(s.device().index());
|
59 |
+
setCurrentXPUStream(stream);
|
60 |
+
return old_stream.unwrap();
|
61 |
+
}
|
62 |
+
|
63 |
+
DeviceIndex deviceCount() const noexcept override {
|
64 |
+
return c10::xpu::device_count();
|
65 |
+
}
|
66 |
+
|
67 |
+
// Event-related functions
|
68 |
+
void destroyEvent(void* event, const DeviceIndex device_index)
|
69 |
+
const noexcept override {}
|
70 |
+
|
71 |
+
void record(
|
72 |
+
void** event,
|
73 |
+
const Stream& stream,
|
74 |
+
const DeviceIndex device_index,
|
75 |
+
const EventFlag flag) const override {
|
76 |
+
TORCH_CHECK(
|
77 |
+
device_index == -1 || device_index == stream.device_index(),
|
78 |
+
"Event device index ",
|
79 |
+
device_index,
|
80 |
+
" does not match recording stream's device index ",
|
81 |
+
stream.device_index(),
|
82 |
+
".");
|
83 |
+
|
84 |
+
auto* xpu_event = reinterpret_cast<sycl::event*>(*event);
|
85 |
+
const XPUStream xpu_stream{stream};
|
86 |
+
*xpu_event = xpu_stream.queue().ext_oneapi_submit_barrier();
|
87 |
+
}
|
88 |
+
|
89 |
+
void block(void* event, const Stream& stream) const override {
|
90 |
+
if (!event)
|
91 |
+
return;
|
92 |
+
auto* xpu_event = reinterpret_cast<sycl::event*>(event);
|
93 |
+
std::vector<sycl::event> event_list{*xpu_event};
|
94 |
+
const XPUStream xpu_stream(stream);
|
95 |
+
xpu_stream.queue().ext_oneapi_submit_barrier(event_list);
|
96 |
+
}
|
97 |
+
|
98 |
+
bool queryEvent(void* event) const override {
|
99 |
+
using namespace sycl::info;
|
100 |
+
if (!event)
|
101 |
+
return true;
|
102 |
+
auto* xpu_event = reinterpret_cast<sycl::event*>(event);
|
103 |
+
return xpu_event->get_info<event::command_execution_status>() ==
|
104 |
+
event_command_status::complete;
|
105 |
+
}
|
106 |
+
|
107 |
+
// Stream-related functions
|
108 |
+
bool queryStream(const Stream& stream) const override {
|
109 |
+
const XPUStream xpu_stream{stream};
|
110 |
+
return xpu_stream.query();
|
111 |
+
}
|
112 |
+
|
113 |
+
void synchronizeStream(const Stream& stream) const override {
|
114 |
+
const XPUStream xpu_stream{stream};
|
115 |
+
xpu_stream.synchronize();
|
116 |
+
}
|
117 |
+
|
118 |
+
void recordDataPtrOnStream(const c10::DataPtr& data_ptr, const Stream& stream)
|
119 |
+
const override {
|
120 |
+
const XPUStream xpu_stream{stream};
|
121 |
+
XPUCachingAllocator::recordStream(data_ptr, xpu_stream);
|
122 |
+
}
|
123 |
+
};
|
124 |
+
|
125 |
+
} // namespace c10::xpu::impl
|
venv/lib/python3.10/site-packages/torch/include/pybind11/attr.h
ADDED
@@ -0,0 +1,690 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/attr.h: Infrastructure for processing custom
|
3 |
+
type and function attributes
|
4 |
+
|
5 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
6 |
+
|
7 |
+
All rights reserved. Use of this source code is governed by a
|
8 |
+
BSD-style license that can be found in the LICENSE file.
|
9 |
+
*/
|
10 |
+
|
11 |
+
#pragma once
|
12 |
+
|
13 |
+
#include "detail/common.h"
|
14 |
+
#include "cast.h"
|
15 |
+
|
16 |
+
#include <functional>
|
17 |
+
|
18 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
19 |
+
|
20 |
+
/// \addtogroup annotations
|
21 |
+
/// @{
|
22 |
+
|
23 |
+
/// Annotation for methods
|
24 |
+
struct is_method {
|
25 |
+
handle class_;
|
26 |
+
explicit is_method(const handle &c) : class_(c) {}
|
27 |
+
};
|
28 |
+
|
29 |
+
/// Annotation for setters
|
30 |
+
struct is_setter {};
|
31 |
+
|
32 |
+
/// Annotation for operators
|
33 |
+
struct is_operator {};
|
34 |
+
|
35 |
+
/// Annotation for classes that cannot be subclassed
|
36 |
+
struct is_final {};
|
37 |
+
|
38 |
+
/// Annotation for parent scope
|
39 |
+
struct scope {
|
40 |
+
handle value;
|
41 |
+
explicit scope(const handle &s) : value(s) {}
|
42 |
+
};
|
43 |
+
|
44 |
+
/// Annotation for documentation
|
45 |
+
struct doc {
|
46 |
+
const char *value;
|
47 |
+
explicit doc(const char *value) : value(value) {}
|
48 |
+
};
|
49 |
+
|
50 |
+
/// Annotation for function names
|
51 |
+
struct name {
|
52 |
+
const char *value;
|
53 |
+
explicit name(const char *value) : value(value) {}
|
54 |
+
};
|
55 |
+
|
56 |
+
/// Annotation indicating that a function is an overload associated with a given "sibling"
|
57 |
+
struct sibling {
|
58 |
+
handle value;
|
59 |
+
explicit sibling(const handle &value) : value(value.ptr()) {}
|
60 |
+
};
|
61 |
+
|
62 |
+
/// Annotation indicating that a class derives from another given type
|
63 |
+
template <typename T>
|
64 |
+
struct base {
|
65 |
+
|
66 |
+
PYBIND11_DEPRECATED(
|
67 |
+
"base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
|
68 |
+
base() = default;
|
69 |
+
};
|
70 |
+
|
71 |
+
/// Keep patient alive while nurse lives
|
72 |
+
template <size_t Nurse, size_t Patient>
|
73 |
+
struct keep_alive {};
|
74 |
+
|
75 |
+
/// Annotation indicating that a class is involved in a multiple inheritance relationship
|
76 |
+
struct multiple_inheritance {};
|
77 |
+
|
78 |
+
/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
|
79 |
+
struct dynamic_attr {};
|
80 |
+
|
81 |
+
/// Annotation which enables the buffer protocol for a type
|
82 |
+
struct buffer_protocol {};
|
83 |
+
|
84 |
+
/// Annotation which requests that a special metaclass is created for a type
|
85 |
+
struct metaclass {
|
86 |
+
handle value;
|
87 |
+
|
88 |
+
PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
|
89 |
+
metaclass() = default;
|
90 |
+
|
91 |
+
/// Override pybind11's default metaclass
|
92 |
+
explicit metaclass(handle value) : value(value) {}
|
93 |
+
};
|
94 |
+
|
95 |
+
/// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that
|
96 |
+
/// may be used to customize the Python type.
|
97 |
+
///
|
98 |
+
/// The callback is invoked immediately before `PyType_Ready`.
|
99 |
+
///
|
100 |
+
/// Note: This is an advanced interface, and uses of it may require changes to
|
101 |
+
/// work with later versions of pybind11. You may wish to consult the
|
102 |
+
/// implementation of `make_new_python_type` in `detail/classes.h` to understand
|
103 |
+
/// the context in which the callback will be run.
|
104 |
+
struct custom_type_setup {
|
105 |
+
using callback = std::function<void(PyHeapTypeObject *heap_type)>;
|
106 |
+
|
107 |
+
explicit custom_type_setup(callback value) : value(std::move(value)) {}
|
108 |
+
|
109 |
+
callback value;
|
110 |
+
};
|
111 |
+
|
112 |
+
/// Annotation that marks a class as local to the module:
|
113 |
+
struct module_local {
|
114 |
+
const bool value;
|
115 |
+
constexpr explicit module_local(bool v = true) : value(v) {}
|
116 |
+
};
|
117 |
+
|
118 |
+
/// Annotation to mark enums as an arithmetic type
|
119 |
+
struct arithmetic {};
|
120 |
+
|
121 |
+
/// Mark a function for addition at the beginning of the existing overload chain instead of the end
|
122 |
+
struct prepend {};
|
123 |
+
|
124 |
+
/** \rst
|
125 |
+
A call policy which places one or more guard variables (``Ts...``) around the function call.
|
126 |
+
|
127 |
+
For example, this definition:
|
128 |
+
|
129 |
+
.. code-block:: cpp
|
130 |
+
|
131 |
+
m.def("foo", foo, py::call_guard<T>());
|
132 |
+
|
133 |
+
is equivalent to the following pseudocode:
|
134 |
+
|
135 |
+
.. code-block:: cpp
|
136 |
+
|
137 |
+
m.def("foo", [](args...) {
|
138 |
+
T scope_guard;
|
139 |
+
return foo(args...); // forwarded arguments
|
140 |
+
});
|
141 |
+
\endrst */
|
142 |
+
template <typename... Ts>
|
143 |
+
struct call_guard;
|
144 |
+
|
145 |
+
template <>
|
146 |
+
struct call_guard<> {
|
147 |
+
using type = detail::void_type;
|
148 |
+
};
|
149 |
+
|
150 |
+
template <typename T>
|
151 |
+
struct call_guard<T> {
|
152 |
+
static_assert(std::is_default_constructible<T>::value,
|
153 |
+
"The guard type must be default constructible");
|
154 |
+
|
155 |
+
using type = T;
|
156 |
+
};
|
157 |
+
|
158 |
+
template <typename T, typename... Ts>
|
159 |
+
struct call_guard<T, Ts...> {
|
160 |
+
struct type {
|
161 |
+
T guard{}; // Compose multiple guard types with left-to-right default-constructor order
|
162 |
+
typename call_guard<Ts...>::type next{};
|
163 |
+
};
|
164 |
+
};
|
165 |
+
|
166 |
+
/// @} annotations
|
167 |
+
|
168 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
169 |
+
/* Forward declarations */
|
170 |
+
enum op_id : int;
|
171 |
+
enum op_type : int;
|
172 |
+
struct undefined_t;
|
173 |
+
template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t>
|
174 |
+
struct op_;
|
175 |
+
void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
|
176 |
+
|
177 |
+
/// Internal data structure which holds metadata about a keyword argument
|
178 |
+
struct argument_record {
|
179 |
+
const char *name; ///< Argument name
|
180 |
+
const char *descr; ///< Human-readable version of the argument value
|
181 |
+
handle value; ///< Associated Python object
|
182 |
+
bool convert : 1; ///< True if the argument is allowed to convert when loading
|
183 |
+
bool none : 1; ///< True if None is allowed when loading
|
184 |
+
|
185 |
+
argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
|
186 |
+
: name(name), descr(descr), value(value), convert(convert), none(none) {}
|
187 |
+
};
|
188 |
+
|
189 |
+
/// Internal data structure which holds metadata about a bound function (signature, overloads,
|
190 |
+
/// etc.)
|
191 |
+
struct function_record {
|
192 |
+
function_record()
|
193 |
+
: is_constructor(false), is_new_style_constructor(false), is_stateless(false),
|
194 |
+
is_operator(false), is_method(false), is_setter(false), has_args(false),
|
195 |
+
has_kwargs(false), prepend(false) {}
|
196 |
+
|
197 |
+
/// Function name
|
198 |
+
char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
|
199 |
+
|
200 |
+
// User-specified documentation string
|
201 |
+
char *doc = nullptr;
|
202 |
+
|
203 |
+
/// Human-readable version of the function signature
|
204 |
+
char *signature = nullptr;
|
205 |
+
|
206 |
+
/// List of registered keyword arguments
|
207 |
+
std::vector<argument_record> args;
|
208 |
+
|
209 |
+
/// Pointer to lambda function which converts arguments and performs the actual call
|
210 |
+
handle (*impl)(function_call &) = nullptr;
|
211 |
+
|
212 |
+
/// Storage for the wrapped function pointer and captured data, if any
|
213 |
+
void *data[3] = {};
|
214 |
+
|
215 |
+
/// Pointer to custom destructor for 'data' (if needed)
|
216 |
+
void (*free_data)(function_record *ptr) = nullptr;
|
217 |
+
|
218 |
+
/// Return value policy associated with this function
|
219 |
+
return_value_policy policy = return_value_policy::automatic;
|
220 |
+
|
221 |
+
/// True if name == '__init__'
|
222 |
+
bool is_constructor : 1;
|
223 |
+
|
224 |
+
/// True if this is a new-style `__init__` defined in `detail/init.h`
|
225 |
+
bool is_new_style_constructor : 1;
|
226 |
+
|
227 |
+
/// True if this is a stateless function pointer
|
228 |
+
bool is_stateless : 1;
|
229 |
+
|
230 |
+
/// True if this is an operator (__add__), etc.
|
231 |
+
bool is_operator : 1;
|
232 |
+
|
233 |
+
/// True if this is a method
|
234 |
+
bool is_method : 1;
|
235 |
+
|
236 |
+
/// True if this is a setter
|
237 |
+
bool is_setter : 1;
|
238 |
+
|
239 |
+
/// True if the function has a '*args' argument
|
240 |
+
bool has_args : 1;
|
241 |
+
|
242 |
+
/// True if the function has a '**kwargs' argument
|
243 |
+
bool has_kwargs : 1;
|
244 |
+
|
245 |
+
/// True if this function is to be inserted at the beginning of the overload resolution chain
|
246 |
+
bool prepend : 1;
|
247 |
+
|
248 |
+
/// Number of arguments (including py::args and/or py::kwargs, if present)
|
249 |
+
std::uint16_t nargs;
|
250 |
+
|
251 |
+
/// Number of leading positional arguments, which are terminated by a py::args or py::kwargs
|
252 |
+
/// argument or by a py::kw_only annotation.
|
253 |
+
std::uint16_t nargs_pos = 0;
|
254 |
+
|
255 |
+
/// Number of leading arguments (counted in `nargs`) that are positional-only
|
256 |
+
std::uint16_t nargs_pos_only = 0;
|
257 |
+
|
258 |
+
/// Python method object
|
259 |
+
PyMethodDef *def = nullptr;
|
260 |
+
|
261 |
+
/// Python handle to the parent scope (a class or a module)
|
262 |
+
handle scope;
|
263 |
+
|
264 |
+
/// Python handle to the sibling function representing an overload chain
|
265 |
+
handle sibling;
|
266 |
+
|
267 |
+
/// Pointer to next overload
|
268 |
+
function_record *next = nullptr;
|
269 |
+
};
|
270 |
+
|
271 |
+
/// Special data structure which (temporarily) holds metadata about a bound class
|
272 |
+
struct type_record {
|
273 |
+
PYBIND11_NOINLINE type_record()
|
274 |
+
: multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
|
275 |
+
default_holder(true), module_local(false), is_final(false) {}
|
276 |
+
|
277 |
+
/// Handle to the parent scope
|
278 |
+
handle scope;
|
279 |
+
|
280 |
+
/// Name of the class
|
281 |
+
const char *name = nullptr;
|
282 |
+
|
283 |
+
// Pointer to RTTI type_info data structure
|
284 |
+
const std::type_info *type = nullptr;
|
285 |
+
|
286 |
+
/// How large is the underlying C++ type?
|
287 |
+
size_t type_size = 0;
|
288 |
+
|
289 |
+
/// What is the alignment of the underlying C++ type?
|
290 |
+
size_t type_align = 0;
|
291 |
+
|
292 |
+
/// How large is the type's holder?
|
293 |
+
size_t holder_size = 0;
|
294 |
+
|
295 |
+
/// The global operator new can be overridden with a class-specific variant
|
296 |
+
void *(*operator_new)(size_t) = nullptr;
|
297 |
+
|
298 |
+
/// Function pointer to class_<..>::init_instance
|
299 |
+
void (*init_instance)(instance *, const void *) = nullptr;
|
300 |
+
|
301 |
+
/// Function pointer to class_<..>::dealloc
|
302 |
+
void (*dealloc)(detail::value_and_holder &) = nullptr;
|
303 |
+
|
304 |
+
/// List of base classes of the newly created type
|
305 |
+
list bases;
|
306 |
+
|
307 |
+
/// Optional docstring
|
308 |
+
const char *doc = nullptr;
|
309 |
+
|
310 |
+
/// Custom metaclass (optional)
|
311 |
+
handle metaclass;
|
312 |
+
|
313 |
+
/// Custom type setup.
|
314 |
+
custom_type_setup::callback custom_type_setup_callback;
|
315 |
+
|
316 |
+
/// Multiple inheritance marker
|
317 |
+
bool multiple_inheritance : 1;
|
318 |
+
|
319 |
+
/// Does the class manage a __dict__?
|
320 |
+
bool dynamic_attr : 1;
|
321 |
+
|
322 |
+
/// Does the class implement the buffer protocol?
|
323 |
+
bool buffer_protocol : 1;
|
324 |
+
|
325 |
+
/// Is the default (unique_ptr) holder type used?
|
326 |
+
bool default_holder : 1;
|
327 |
+
|
328 |
+
/// Is the class definition local to the module shared object?
|
329 |
+
bool module_local : 1;
|
330 |
+
|
331 |
+
/// Is the class inheritable from python classes?
|
332 |
+
bool is_final : 1;
|
333 |
+
|
334 |
+
PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) {
|
335 |
+
auto *base_info = detail::get_type_info(base, false);
|
336 |
+
if (!base_info) {
|
337 |
+
std::string tname(base.name());
|
338 |
+
detail::clean_type_id(tname);
|
339 |
+
pybind11_fail("generic_type: type \"" + std::string(name)
|
340 |
+
+ "\" referenced unknown base type \"" + tname + "\"");
|
341 |
+
}
|
342 |
+
|
343 |
+
if (default_holder != base_info->default_holder) {
|
344 |
+
std::string tname(base.name());
|
345 |
+
detail::clean_type_id(tname);
|
346 |
+
pybind11_fail("generic_type: type \"" + std::string(name) + "\" "
|
347 |
+
+ (default_holder ? "does not have" : "has")
|
348 |
+
+ " a non-default holder type while its base \"" + tname + "\" "
|
349 |
+
+ (base_info->default_holder ? "does not" : "does"));
|
350 |
+
}
|
351 |
+
|
352 |
+
bases.append((PyObject *) base_info->type);
|
353 |
+
|
354 |
+
#if PY_VERSION_HEX < 0x030B0000
|
355 |
+
dynamic_attr |= base_info->type->tp_dictoffset != 0;
|
356 |
+
#else
|
357 |
+
dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0;
|
358 |
+
#endif
|
359 |
+
|
360 |
+
if (caster) {
|
361 |
+
base_info->implicit_casts.emplace_back(type, caster);
|
362 |
+
}
|
363 |
+
}
|
364 |
+
};
|
365 |
+
|
366 |
+
inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) {
|
367 |
+
args.reserve(f.nargs);
|
368 |
+
args_convert.reserve(f.nargs);
|
369 |
+
}
|
370 |
+
|
371 |
+
/// Tag for a new-style `__init__` defined in `detail/init.h`
|
372 |
+
struct is_new_style_constructor {};
|
373 |
+
|
374 |
+
/**
|
375 |
+
* Partial template specializations to process custom attributes provided to
|
376 |
+
* cpp_function_ and class_. These are either used to initialize the respective
|
377 |
+
* fields in the type_record and function_record data structures or executed at
|
378 |
+
* runtime to deal with custom call policies (e.g. keep_alive).
|
379 |
+
*/
|
380 |
+
template <typename T, typename SFINAE = void>
|
381 |
+
struct process_attribute;
|
382 |
+
|
383 |
+
template <typename T>
|
384 |
+
struct process_attribute_default {
|
385 |
+
/// Default implementation: do nothing
|
386 |
+
static void init(const T &, function_record *) {}
|
387 |
+
static void init(const T &, type_record *) {}
|
388 |
+
static void precall(function_call &) {}
|
389 |
+
static void postcall(function_call &, handle) {}
|
390 |
+
};
|
391 |
+
|
392 |
+
/// Process an attribute specifying the function's name
|
393 |
+
template <>
|
394 |
+
struct process_attribute<name> : process_attribute_default<name> {
|
395 |
+
static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
|
396 |
+
};
|
397 |
+
|
398 |
+
/// Process an attribute specifying the function's docstring
|
399 |
+
template <>
|
400 |
+
struct process_attribute<doc> : process_attribute_default<doc> {
|
401 |
+
static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
|
402 |
+
};
|
403 |
+
|
404 |
+
/// Process an attribute specifying the function's docstring (provided as a C-style string)
|
405 |
+
template <>
|
406 |
+
struct process_attribute<const char *> : process_attribute_default<const char *> {
|
407 |
+
static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
|
408 |
+
static void init(const char *d, type_record *r) { r->doc = d; }
|
409 |
+
};
|
410 |
+
template <>
|
411 |
+
struct process_attribute<char *> : process_attribute<const char *> {};
|
412 |
+
|
413 |
+
/// Process an attribute indicating the function's return value policy
|
414 |
+
template <>
|
415 |
+
struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
|
416 |
+
static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
|
417 |
+
};
|
418 |
+
|
419 |
+
/// Process an attribute which indicates that this is an overloaded function associated with a
|
420 |
+
/// given sibling
|
421 |
+
template <>
|
422 |
+
struct process_attribute<sibling> : process_attribute_default<sibling> {
|
423 |
+
static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
|
424 |
+
};
|
425 |
+
|
426 |
+
/// Process an attribute which indicates that this function is a method
|
427 |
+
template <>
|
428 |
+
struct process_attribute<is_method> : process_attribute_default<is_method> {
|
429 |
+
static void init(const is_method &s, function_record *r) {
|
430 |
+
r->is_method = true;
|
431 |
+
r->scope = s.class_;
|
432 |
+
}
|
433 |
+
};
|
434 |
+
|
435 |
+
/// Process an attribute which indicates that this function is a setter
|
436 |
+
template <>
|
437 |
+
struct process_attribute<is_setter> : process_attribute_default<is_setter> {
|
438 |
+
static void init(const is_setter &, function_record *r) { r->is_setter = true; }
|
439 |
+
};
|
440 |
+
|
441 |
+
/// Process an attribute which indicates the parent scope of a method
|
442 |
+
template <>
|
443 |
+
struct process_attribute<scope> : process_attribute_default<scope> {
|
444 |
+
static void init(const scope &s, function_record *r) { r->scope = s.value; }
|
445 |
+
};
|
446 |
+
|
447 |
+
/// Process an attribute which indicates that this function is an operator
|
448 |
+
template <>
|
449 |
+
struct process_attribute<is_operator> : process_attribute_default<is_operator> {
|
450 |
+
static void init(const is_operator &, function_record *r) { r->is_operator = true; }
|
451 |
+
};
|
452 |
+
|
453 |
+
template <>
|
454 |
+
struct process_attribute<is_new_style_constructor>
|
455 |
+
: process_attribute_default<is_new_style_constructor> {
|
456 |
+
static void init(const is_new_style_constructor &, function_record *r) {
|
457 |
+
r->is_new_style_constructor = true;
|
458 |
+
}
|
459 |
+
};
|
460 |
+
|
461 |
+
inline void check_kw_only_arg(const arg &a, function_record *r) {
|
462 |
+
if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) {
|
463 |
+
pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or "
|
464 |
+
"args() argument");
|
465 |
+
}
|
466 |
+
}
|
467 |
+
|
468 |
+
inline void append_self_arg_if_needed(function_record *r) {
|
469 |
+
if (r->is_method && r->args.empty()) {
|
470 |
+
r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false);
|
471 |
+
}
|
472 |
+
}
|
473 |
+
|
474 |
+
/// Process a keyword argument attribute (*without* a default value)
|
475 |
+
template <>
|
476 |
+
struct process_attribute<arg> : process_attribute_default<arg> {
|
477 |
+
static void init(const arg &a, function_record *r) {
|
478 |
+
append_self_arg_if_needed(r);
|
479 |
+
r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
|
480 |
+
|
481 |
+
check_kw_only_arg(a, r);
|
482 |
+
}
|
483 |
+
};
|
484 |
+
|
485 |
+
/// Process a keyword argument attribute (*with* a default value)
|
486 |
+
template <>
|
487 |
+
struct process_attribute<arg_v> : process_attribute_default<arg_v> {
|
488 |
+
static void init(const arg_v &a, function_record *r) {
|
489 |
+
if (r->is_method && r->args.empty()) {
|
490 |
+
r->args.emplace_back(
|
491 |
+
"self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false);
|
492 |
+
}
|
493 |
+
|
494 |
+
if (!a.value) {
|
495 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
496 |
+
std::string descr("'");
|
497 |
+
if (a.name) {
|
498 |
+
descr += std::string(a.name) + ": ";
|
499 |
+
}
|
500 |
+
descr += a.type + "'";
|
501 |
+
if (r->is_method) {
|
502 |
+
if (r->name) {
|
503 |
+
descr += " in method '" + (std::string) str(r->scope) + "."
|
504 |
+
+ (std::string) r->name + "'";
|
505 |
+
} else {
|
506 |
+
descr += " in method of '" + (std::string) str(r->scope) + "'";
|
507 |
+
}
|
508 |
+
} else if (r->name) {
|
509 |
+
descr += " in function '" + (std::string) r->name + "'";
|
510 |
+
}
|
511 |
+
pybind11_fail("arg(): could not convert default argument " + descr
|
512 |
+
+ " into a Python object (type not registered yet?)");
|
513 |
+
#else
|
514 |
+
pybind11_fail("arg(): could not convert default argument "
|
515 |
+
"into a Python object (type not registered yet?). "
|
516 |
+
"#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
|
517 |
+
"more information.");
|
518 |
+
#endif
|
519 |
+
}
|
520 |
+
r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
|
521 |
+
|
522 |
+
check_kw_only_arg(a, r);
|
523 |
+
}
|
524 |
+
};
|
525 |
+
|
526 |
+
/// Process a keyword-only-arguments-follow pseudo argument
|
527 |
+
template <>
|
528 |
+
struct process_attribute<kw_only> : process_attribute_default<kw_only> {
|
529 |
+
static void init(const kw_only &, function_record *r) {
|
530 |
+
append_self_arg_if_needed(r);
|
531 |
+
if (r->has_args && r->nargs_pos != static_cast<std::uint16_t>(r->args.size())) {
|
532 |
+
pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative "
|
533 |
+
"argument location (or omit kw_only() entirely)");
|
534 |
+
}
|
535 |
+
r->nargs_pos = static_cast<std::uint16_t>(r->args.size());
|
536 |
+
}
|
537 |
+
};
|
538 |
+
|
539 |
+
/// Process a positional-only-argument maker
|
540 |
+
template <>
|
541 |
+
struct process_attribute<pos_only> : process_attribute_default<pos_only> {
|
542 |
+
static void init(const pos_only &, function_record *r) {
|
543 |
+
append_self_arg_if_needed(r);
|
544 |
+
r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
|
545 |
+
if (r->nargs_pos_only > r->nargs_pos) {
|
546 |
+
pybind11_fail("pos_only(): cannot follow a py::args() argument");
|
547 |
+
}
|
548 |
+
// It also can't follow a kw_only, but a static_assert in pybind11.h checks that
|
549 |
+
}
|
550 |
+
};
|
551 |
+
|
552 |
+
/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees
|
553 |
+
/// that)
|
554 |
+
template <typename T>
|
555 |
+
struct process_attribute<T, enable_if_t<is_pyobject<T>::value>>
|
556 |
+
: process_attribute_default<handle> {
|
557 |
+
static void init(const handle &h, type_record *r) { r->bases.append(h); }
|
558 |
+
};
|
559 |
+
|
560 |
+
/// Process a parent class attribute (deprecated, does not support multiple inheritance)
|
561 |
+
template <typename T>
|
562 |
+
struct process_attribute<base<T>> : process_attribute_default<base<T>> {
|
563 |
+
static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
|
564 |
+
};
|
565 |
+
|
566 |
+
/// Process a multiple inheritance attribute
|
567 |
+
template <>
|
568 |
+
struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
|
569 |
+
static void init(const multiple_inheritance &, type_record *r) {
|
570 |
+
r->multiple_inheritance = true;
|
571 |
+
}
|
572 |
+
};
|
573 |
+
|
574 |
+
template <>
|
575 |
+
struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
|
576 |
+
static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
|
577 |
+
};
|
578 |
+
|
579 |
+
template <>
|
580 |
+
struct process_attribute<custom_type_setup> {
|
581 |
+
static void init(const custom_type_setup &value, type_record *r) {
|
582 |
+
r->custom_type_setup_callback = value.value;
|
583 |
+
}
|
584 |
+
};
|
585 |
+
|
586 |
+
template <>
|
587 |
+
struct process_attribute<is_final> : process_attribute_default<is_final> {
|
588 |
+
static void init(const is_final &, type_record *r) { r->is_final = true; }
|
589 |
+
};
|
590 |
+
|
591 |
+
template <>
|
592 |
+
struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
|
593 |
+
static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
|
594 |
+
};
|
595 |
+
|
596 |
+
template <>
|
597 |
+
struct process_attribute<metaclass> : process_attribute_default<metaclass> {
|
598 |
+
static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
|
599 |
+
};
|
600 |
+
|
601 |
+
template <>
|
602 |
+
struct process_attribute<module_local> : process_attribute_default<module_local> {
|
603 |
+
static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
|
604 |
+
};
|
605 |
+
|
606 |
+
/// Process a 'prepend' attribute, putting this at the beginning of the overload chain
|
607 |
+
template <>
|
608 |
+
struct process_attribute<prepend> : process_attribute_default<prepend> {
|
609 |
+
static void init(const prepend &, function_record *r) { r->prepend = true; }
|
610 |
+
};
|
611 |
+
|
612 |
+
/// Process an 'arithmetic' attribute for enums (does nothing here)
|
613 |
+
template <>
|
614 |
+
struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
|
615 |
+
|
616 |
+
template <typename... Ts>
|
617 |
+
struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> {};
|
618 |
+
|
619 |
+
/**
|
620 |
+
* Process a keep_alive call policy -- invokes keep_alive_impl during the
|
621 |
+
* pre-call handler if both Nurse, Patient != 0 and use the post-call handler
|
622 |
+
* otherwise
|
623 |
+
*/
|
624 |
+
template <size_t Nurse, size_t Patient>
|
625 |
+
struct process_attribute<keep_alive<Nurse, Patient>>
|
626 |
+
: public process_attribute_default<keep_alive<Nurse, Patient>> {
|
627 |
+
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
|
628 |
+
static void precall(function_call &call) {
|
629 |
+
keep_alive_impl(Nurse, Patient, call, handle());
|
630 |
+
}
|
631 |
+
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
|
632 |
+
static void postcall(function_call &, handle) {}
|
633 |
+
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
|
634 |
+
static void precall(function_call &) {}
|
635 |
+
template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
|
636 |
+
static void postcall(function_call &call, handle ret) {
|
637 |
+
keep_alive_impl(Nurse, Patient, call, ret);
|
638 |
+
}
|
639 |
+
};
|
640 |
+
|
641 |
+
/// Recursively iterate over variadic template arguments
|
642 |
+
template <typename... Args>
|
643 |
+
struct process_attributes {
|
644 |
+
static void init(const Args &...args, function_record *r) {
|
645 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
|
646 |
+
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
|
647 |
+
using expander = int[];
|
648 |
+
(void) expander{
|
649 |
+
0, ((void) process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
|
650 |
+
}
|
651 |
+
static void init(const Args &...args, type_record *r) {
|
652 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
|
653 |
+
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
|
654 |
+
using expander = int[];
|
655 |
+
(void) expander{0,
|
656 |
+
(process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
|
657 |
+
}
|
658 |
+
static void precall(function_call &call) {
|
659 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call);
|
660 |
+
using expander = int[];
|
661 |
+
(void) expander{0,
|
662 |
+
(process_attribute<typename std::decay<Args>::type>::precall(call), 0)...};
|
663 |
+
}
|
664 |
+
static void postcall(function_call &call, handle fn_ret) {
|
665 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret);
|
666 |
+
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret);
|
667 |
+
using expander = int[];
|
668 |
+
(void) expander{
|
669 |
+
0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0)...};
|
670 |
+
}
|
671 |
+
};
|
672 |
+
|
673 |
+
template <typename T>
|
674 |
+
using is_call_guard = is_instantiation<call_guard, T>;
|
675 |
+
|
676 |
+
/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
|
677 |
+
template <typename... Extra>
|
678 |
+
using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
|
679 |
+
|
680 |
+
/// Check the number of named arguments at compile time
|
681 |
+
template <typename... Extra,
|
682 |
+
size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
|
683 |
+
size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
|
684 |
+
constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
|
685 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs);
|
686 |
+
return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs;
|
687 |
+
}
|
688 |
+
|
689 |
+
PYBIND11_NAMESPACE_END(detail)
|
690 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/buffer_info.h
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/buffer_info.h: Python buffer object interface
|
3 |
+
|
4 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "detail/common.h"
|
13 |
+
|
14 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
15 |
+
|
16 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
17 |
+
|
18 |
+
// Default, C-style strides
|
19 |
+
inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
|
20 |
+
auto ndim = shape.size();
|
21 |
+
std::vector<ssize_t> strides(ndim, itemsize);
|
22 |
+
if (ndim > 0) {
|
23 |
+
for (size_t i = ndim - 1; i > 0; --i) {
|
24 |
+
strides[i - 1] = strides[i] * shape[i];
|
25 |
+
}
|
26 |
+
}
|
27 |
+
return strides;
|
28 |
+
}
|
29 |
+
|
30 |
+
// F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
|
31 |
+
inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
|
32 |
+
auto ndim = shape.size();
|
33 |
+
std::vector<ssize_t> strides(ndim, itemsize);
|
34 |
+
for (size_t i = 1; i < ndim; ++i) {
|
35 |
+
strides[i] = strides[i - 1] * shape[i - 1];
|
36 |
+
}
|
37 |
+
return strides;
|
38 |
+
}
|
39 |
+
|
40 |
+
template <typename T, typename SFINAE = void>
|
41 |
+
struct compare_buffer_info;
|
42 |
+
|
43 |
+
PYBIND11_NAMESPACE_END(detail)
|
44 |
+
|
45 |
+
/// Information record describing a Python buffer object
|
46 |
+
struct buffer_info {
|
47 |
+
void *ptr = nullptr; // Pointer to the underlying storage
|
48 |
+
ssize_t itemsize = 0; // Size of individual items in bytes
|
49 |
+
ssize_t size = 0; // Total number of entries
|
50 |
+
std::string format; // For homogeneous buffers, this should be set to
|
51 |
+
// format_descriptor<T>::format()
|
52 |
+
ssize_t ndim = 0; // Number of dimensions
|
53 |
+
std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
|
54 |
+
std::vector<ssize_t> strides; // Number of bytes between adjacent entries
|
55 |
+
// (for each per dimension)
|
56 |
+
bool readonly = false; // flag to indicate if the underlying storage may be written to
|
57 |
+
|
58 |
+
buffer_info() = default;
|
59 |
+
|
60 |
+
buffer_info(void *ptr,
|
61 |
+
ssize_t itemsize,
|
62 |
+
const std::string &format,
|
63 |
+
ssize_t ndim,
|
64 |
+
detail::any_container<ssize_t> shape_in,
|
65 |
+
detail::any_container<ssize_t> strides_in,
|
66 |
+
bool readonly = false)
|
67 |
+
: ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
|
68 |
+
shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
|
69 |
+
if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) {
|
70 |
+
pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
|
71 |
+
}
|
72 |
+
for (size_t i = 0; i < (size_t) ndim; ++i) {
|
73 |
+
size *= shape[i];
|
74 |
+
}
|
75 |
+
}
|
76 |
+
|
77 |
+
template <typename T>
|
78 |
+
buffer_info(T *ptr,
|
79 |
+
detail::any_container<ssize_t> shape_in,
|
80 |
+
detail::any_container<ssize_t> strides_in,
|
81 |
+
bool readonly = false)
|
82 |
+
: buffer_info(private_ctr_tag(),
|
83 |
+
ptr,
|
84 |
+
sizeof(T),
|
85 |
+
format_descriptor<T>::format(),
|
86 |
+
static_cast<ssize_t>(shape_in->size()),
|
87 |
+
std::move(shape_in),
|
88 |
+
std::move(strides_in),
|
89 |
+
readonly) {}
|
90 |
+
|
91 |
+
buffer_info(void *ptr,
|
92 |
+
ssize_t itemsize,
|
93 |
+
const std::string &format,
|
94 |
+
ssize_t size,
|
95 |
+
bool readonly = false)
|
96 |
+
: buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {}
|
97 |
+
|
98 |
+
template <typename T>
|
99 |
+
buffer_info(T *ptr, ssize_t size, bool readonly = false)
|
100 |
+
: buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) {}
|
101 |
+
|
102 |
+
template <typename T>
|
103 |
+
buffer_info(const T *ptr, ssize_t size, bool readonly = true)
|
104 |
+
: buffer_info(
|
105 |
+
const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
|
106 |
+
|
107 |
+
explicit buffer_info(Py_buffer *view, bool ownview = true)
|
108 |
+
: buffer_info(
|
109 |
+
view->buf,
|
110 |
+
view->itemsize,
|
111 |
+
view->format,
|
112 |
+
view->ndim,
|
113 |
+
{view->shape, view->shape + view->ndim},
|
114 |
+
/* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
|
115 |
+
* ignore this flag and return a view with NULL strides.
|
116 |
+
* When strides are NULL, build them manually. */
|
117 |
+
view->strides
|
118 |
+
? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
|
119 |
+
: detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
|
120 |
+
(view->readonly != 0)) {
|
121 |
+
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
122 |
+
this->m_view = view;
|
123 |
+
// NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
|
124 |
+
this->ownview = ownview;
|
125 |
+
}
|
126 |
+
|
127 |
+
buffer_info(const buffer_info &) = delete;
|
128 |
+
buffer_info &operator=(const buffer_info &) = delete;
|
129 |
+
|
130 |
+
buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); }
|
131 |
+
|
132 |
+
buffer_info &operator=(buffer_info &&rhs) noexcept {
|
133 |
+
ptr = rhs.ptr;
|
134 |
+
itemsize = rhs.itemsize;
|
135 |
+
size = rhs.size;
|
136 |
+
format = std::move(rhs.format);
|
137 |
+
ndim = rhs.ndim;
|
138 |
+
shape = std::move(rhs.shape);
|
139 |
+
strides = std::move(rhs.strides);
|
140 |
+
std::swap(m_view, rhs.m_view);
|
141 |
+
std::swap(ownview, rhs.ownview);
|
142 |
+
readonly = rhs.readonly;
|
143 |
+
return *this;
|
144 |
+
}
|
145 |
+
|
146 |
+
~buffer_info() {
|
147 |
+
if (m_view && ownview) {
|
148 |
+
PyBuffer_Release(m_view);
|
149 |
+
delete m_view;
|
150 |
+
}
|
151 |
+
}
|
152 |
+
|
153 |
+
Py_buffer *view() const { return m_view; }
|
154 |
+
Py_buffer *&view() { return m_view; }
|
155 |
+
|
156 |
+
/* True if the buffer item type is equivalent to `T`. */
|
157 |
+
// To define "equivalent" by example:
|
158 |
+
// `buffer_info::item_type_is_equivalent_to<int>(b)` and
|
159 |
+
// `buffer_info::item_type_is_equivalent_to<long>(b)` may both be true
|
160 |
+
// on some platforms, but `int` and `unsigned` will never be equivalent.
|
161 |
+
// For the ground truth, please inspect `detail::compare_buffer_info<>`.
|
162 |
+
template <typename T>
|
163 |
+
bool item_type_is_equivalent_to() const {
|
164 |
+
return detail::compare_buffer_info<T>::compare(*this);
|
165 |
+
}
|
166 |
+
|
167 |
+
private:
|
168 |
+
struct private_ctr_tag {};
|
169 |
+
|
170 |
+
buffer_info(private_ctr_tag,
|
171 |
+
void *ptr,
|
172 |
+
ssize_t itemsize,
|
173 |
+
const std::string &format,
|
174 |
+
ssize_t ndim,
|
175 |
+
detail::any_container<ssize_t> &&shape_in,
|
176 |
+
detail::any_container<ssize_t> &&strides_in,
|
177 |
+
bool readonly)
|
178 |
+
: buffer_info(
|
179 |
+
ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
|
180 |
+
|
181 |
+
Py_buffer *m_view = nullptr;
|
182 |
+
bool ownview = false;
|
183 |
+
};
|
184 |
+
|
185 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
186 |
+
|
187 |
+
template <typename T, typename SFINAE>
|
188 |
+
struct compare_buffer_info {
|
189 |
+
static bool compare(const buffer_info &b) {
|
190 |
+
// NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *`
|
191 |
+
return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
|
192 |
+
}
|
193 |
+
};
|
194 |
+
|
195 |
+
template <typename T>
|
196 |
+
struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {
|
197 |
+
static bool compare(const buffer_info &b) {
|
198 |
+
return (size_t) b.itemsize == sizeof(T)
|
199 |
+
&& (b.format == format_descriptor<T>::value
|
200 |
+
|| ((sizeof(T) == sizeof(long))
|
201 |
+
&& b.format == (std::is_unsigned<T>::value ? "L" : "l"))
|
202 |
+
|| ((sizeof(T) == sizeof(size_t))
|
203 |
+
&& b.format == (std::is_unsigned<T>::value ? "N" : "n")));
|
204 |
+
}
|
205 |
+
};
|
206 |
+
|
207 |
+
PYBIND11_NAMESPACE_END(detail)
|
208 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/cast.h
ADDED
@@ -0,0 +1,1837 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/cast.h: Partial template specializations to cast between
|
3 |
+
C++ and Python types
|
4 |
+
|
5 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
6 |
+
|
7 |
+
All rights reserved. Use of this source code is governed by a
|
8 |
+
BSD-style license that can be found in the LICENSE file.
|
9 |
+
*/
|
10 |
+
|
11 |
+
#pragma once
|
12 |
+
|
13 |
+
#include "detail/common.h"
|
14 |
+
#include "detail/descr.h"
|
15 |
+
#include "detail/type_caster_base.h"
|
16 |
+
#include "detail/typeid.h"
|
17 |
+
#include "pytypes.h"
|
18 |
+
|
19 |
+
#include <array>
|
20 |
+
#include <cstring>
|
21 |
+
#include <functional>
|
22 |
+
#include <iosfwd>
|
23 |
+
#include <iterator>
|
24 |
+
#include <memory>
|
25 |
+
#include <string>
|
26 |
+
#include <tuple>
|
27 |
+
#include <type_traits>
|
28 |
+
#include <utility>
|
29 |
+
#include <vector>
|
30 |
+
|
31 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
32 |
+
|
33 |
+
PYBIND11_WARNING_DISABLE_MSVC(4127)
|
34 |
+
|
35 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
36 |
+
|
37 |
+
template <typename type, typename SFINAE = void>
|
38 |
+
class type_caster : public type_caster_base<type> {};
|
39 |
+
template <typename type>
|
40 |
+
using make_caster = type_caster<intrinsic_t<type>>;
|
41 |
+
|
42 |
+
// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T
|
43 |
+
template <typename T>
|
44 |
+
typename make_caster<T>::template cast_op_type<T> cast_op(make_caster<T> &caster) {
|
45 |
+
using result_t = typename make_caster<T>::template cast_op_type<T>; // See PR #4893
|
46 |
+
return caster.operator result_t();
|
47 |
+
}
|
48 |
+
template <typename T>
|
49 |
+
typename make_caster<T>::template cast_op_type<typename std::add_rvalue_reference<T>::type>
|
50 |
+
cast_op(make_caster<T> &&caster) {
|
51 |
+
using result_t = typename make_caster<T>::template cast_op_type<
|
52 |
+
typename std::add_rvalue_reference<T>::type>; // See PR #4893
|
53 |
+
return std::move(caster).operator result_t();
|
54 |
+
}
|
55 |
+
|
56 |
+
template <typename type>
|
57 |
+
class type_caster<std::reference_wrapper<type>> {
|
58 |
+
private:
|
59 |
+
using caster_t = make_caster<type>;
|
60 |
+
caster_t subcaster;
|
61 |
+
using reference_t = type &;
|
62 |
+
using subcaster_cast_op_type = typename caster_t::template cast_op_type<reference_t>;
|
63 |
+
|
64 |
+
static_assert(
|
65 |
+
std::is_same<typename std::remove_const<type>::type &, subcaster_cast_op_type>::value
|
66 |
+
|| std::is_same<reference_t, subcaster_cast_op_type>::value,
|
67 |
+
"std::reference_wrapper<T> caster requires T to have a caster with an "
|
68 |
+
"`operator T &()` or `operator const T &()`");
|
69 |
+
|
70 |
+
public:
|
71 |
+
bool load(handle src, bool convert) { return subcaster.load(src, convert); }
|
72 |
+
static constexpr auto name = caster_t::name;
|
73 |
+
static handle
|
74 |
+
cast(const std::reference_wrapper<type> &src, return_value_policy policy, handle parent) {
|
75 |
+
// It is definitely wrong to take ownership of this pointer, so mask that rvp
|
76 |
+
if (policy == return_value_policy::take_ownership
|
77 |
+
|| policy == return_value_policy::automatic) {
|
78 |
+
policy = return_value_policy::automatic_reference;
|
79 |
+
}
|
80 |
+
return caster_t::cast(&src.get(), policy, parent);
|
81 |
+
}
|
82 |
+
template <typename T>
|
83 |
+
using cast_op_type = std::reference_wrapper<type>;
|
84 |
+
explicit operator std::reference_wrapper<type>() { return cast_op<type &>(subcaster); }
|
85 |
+
};
|
86 |
+
|
87 |
+
#define PYBIND11_TYPE_CASTER(type, py_name) \
|
88 |
+
protected: \
|
89 |
+
type value; \
|
90 |
+
\
|
91 |
+
public: \
|
92 |
+
static constexpr auto name = py_name; \
|
93 |
+
template <typename T_, \
|
94 |
+
::pybind11::detail::enable_if_t< \
|
95 |
+
std::is_same<type, ::pybind11::detail::remove_cv_t<T_>>::value, \
|
96 |
+
int> \
|
97 |
+
= 0> \
|
98 |
+
static ::pybind11::handle cast( \
|
99 |
+
T_ *src, ::pybind11::return_value_policy policy, ::pybind11::handle parent) { \
|
100 |
+
if (!src) \
|
101 |
+
return ::pybind11::none().release(); \
|
102 |
+
if (policy == ::pybind11::return_value_policy::take_ownership) { \
|
103 |
+
auto h = cast(std::move(*src), policy, parent); \
|
104 |
+
delete src; \
|
105 |
+
return h; \
|
106 |
+
} \
|
107 |
+
return cast(*src, policy, parent); \
|
108 |
+
} \
|
109 |
+
operator type *() { return &value; } /* NOLINT(bugprone-macro-parentheses) */ \
|
110 |
+
operator type &() { return value; } /* NOLINT(bugprone-macro-parentheses) */ \
|
111 |
+
operator type &&() && { return std::move(value); } /* NOLINT(bugprone-macro-parentheses) */ \
|
112 |
+
template <typename T_> \
|
113 |
+
using cast_op_type = ::pybind11::detail::movable_cast_op_type<T_>
|
114 |
+
|
115 |
+
template <typename CharT>
|
116 |
+
using is_std_char_type = any_of<std::is_same<CharT, char>, /* std::string */
|
117 |
+
#if defined(PYBIND11_HAS_U8STRING)
|
118 |
+
std::is_same<CharT, char8_t>, /* std::u8string */
|
119 |
+
#endif
|
120 |
+
std::is_same<CharT, char16_t>, /* std::u16string */
|
121 |
+
std::is_same<CharT, char32_t>, /* std::u32string */
|
122 |
+
std::is_same<CharT, wchar_t> /* std::wstring */
|
123 |
+
>;
|
124 |
+
|
125 |
+
template <typename T>
|
126 |
+
struct type_caster<T, enable_if_t<std::is_arithmetic<T>::value && !is_std_char_type<T>::value>> {
|
127 |
+
using _py_type_0 = conditional_t<sizeof(T) <= sizeof(long), long, long long>;
|
128 |
+
using _py_type_1 = conditional_t<std::is_signed<T>::value,
|
129 |
+
_py_type_0,
|
130 |
+
typename std::make_unsigned<_py_type_0>::type>;
|
131 |
+
using py_type = conditional_t<std::is_floating_point<T>::value, double, _py_type_1>;
|
132 |
+
|
133 |
+
public:
|
134 |
+
bool load(handle src, bool convert) {
|
135 |
+
py_type py_value;
|
136 |
+
|
137 |
+
if (!src) {
|
138 |
+
return false;
|
139 |
+
}
|
140 |
+
|
141 |
+
#if !defined(PYPY_VERSION)
|
142 |
+
auto index_check = [](PyObject *o) { return PyIndex_Check(o); };
|
143 |
+
#else
|
144 |
+
// In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`,
|
145 |
+
// while CPython only considers the existence of `nb_index`/`__index__`.
|
146 |
+
auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); };
|
147 |
+
#endif
|
148 |
+
|
149 |
+
if (std::is_floating_point<T>::value) {
|
150 |
+
if (convert || PyFloat_Check(src.ptr())) {
|
151 |
+
py_value = (py_type) PyFloat_AsDouble(src.ptr());
|
152 |
+
} else {
|
153 |
+
return false;
|
154 |
+
}
|
155 |
+
} else if (PyFloat_Check(src.ptr())
|
156 |
+
|| (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr()))) {
|
157 |
+
return false;
|
158 |
+
} else {
|
159 |
+
handle src_or_index = src;
|
160 |
+
// PyPy: 7.3.7's 3.8 does not implement PyLong_*'s __index__ calls.
|
161 |
+
#if PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
|
162 |
+
object index;
|
163 |
+
if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr())
|
164 |
+
index = reinterpret_steal<object>(PyNumber_Index(src.ptr()));
|
165 |
+
if (!index) {
|
166 |
+
PyErr_Clear();
|
167 |
+
if (!convert)
|
168 |
+
return false;
|
169 |
+
} else {
|
170 |
+
src_or_index = index;
|
171 |
+
}
|
172 |
+
}
|
173 |
+
#endif
|
174 |
+
if (std::is_unsigned<py_type>::value) {
|
175 |
+
py_value = as_unsigned<py_type>(src_or_index.ptr());
|
176 |
+
} else { // signed integer:
|
177 |
+
py_value = sizeof(T) <= sizeof(long)
|
178 |
+
? (py_type) PyLong_AsLong(src_or_index.ptr())
|
179 |
+
: (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr());
|
180 |
+
}
|
181 |
+
}
|
182 |
+
|
183 |
+
// Python API reported an error
|
184 |
+
bool py_err = py_value == (py_type) -1 && PyErr_Occurred();
|
185 |
+
|
186 |
+
// Check to see if the conversion is valid (integers should match exactly)
|
187 |
+
// Signed/unsigned checks happen elsewhere
|
188 |
+
if (py_err
|
189 |
+
|| (std::is_integral<T>::value && sizeof(py_type) != sizeof(T)
|
190 |
+
&& py_value != (py_type) (T) py_value)) {
|
191 |
+
PyErr_Clear();
|
192 |
+
if (py_err && convert && (PyNumber_Check(src.ptr()) != 0)) {
|
193 |
+
auto tmp = reinterpret_steal<object>(std::is_floating_point<T>::value
|
194 |
+
? PyNumber_Float(src.ptr())
|
195 |
+
: PyNumber_Long(src.ptr()));
|
196 |
+
PyErr_Clear();
|
197 |
+
return load(tmp, false);
|
198 |
+
}
|
199 |
+
return false;
|
200 |
+
}
|
201 |
+
|
202 |
+
value = (T) py_value;
|
203 |
+
return true;
|
204 |
+
}
|
205 |
+
|
206 |
+
template <typename U = T>
|
207 |
+
static typename std::enable_if<std::is_floating_point<U>::value, handle>::type
|
208 |
+
cast(U src, return_value_policy /* policy */, handle /* parent */) {
|
209 |
+
return PyFloat_FromDouble((double) src);
|
210 |
+
}
|
211 |
+
|
212 |
+
template <typename U = T>
|
213 |
+
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
|
214 |
+
&& (sizeof(U) <= sizeof(long)),
|
215 |
+
handle>::type
|
216 |
+
cast(U src, return_value_policy /* policy */, handle /* parent */) {
|
217 |
+
return PYBIND11_LONG_FROM_SIGNED((long) src);
|
218 |
+
}
|
219 |
+
|
220 |
+
template <typename U = T>
|
221 |
+
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
|
222 |
+
&& (sizeof(U) <= sizeof(unsigned long)),
|
223 |
+
handle>::type
|
224 |
+
cast(U src, return_value_policy /* policy */, handle /* parent */) {
|
225 |
+
return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src);
|
226 |
+
}
|
227 |
+
|
228 |
+
template <typename U = T>
|
229 |
+
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_signed<U>::value
|
230 |
+
&& (sizeof(U) > sizeof(long)),
|
231 |
+
handle>::type
|
232 |
+
cast(U src, return_value_policy /* policy */, handle /* parent */) {
|
233 |
+
return PyLong_FromLongLong((long long) src);
|
234 |
+
}
|
235 |
+
|
236 |
+
template <typename U = T>
|
237 |
+
static typename std::enable_if<!std::is_floating_point<U>::value && std::is_unsigned<U>::value
|
238 |
+
&& (sizeof(U) > sizeof(unsigned long)),
|
239 |
+
handle>::type
|
240 |
+
cast(U src, return_value_policy /* policy */, handle /* parent */) {
|
241 |
+
return PyLong_FromUnsignedLongLong((unsigned long long) src);
|
242 |
+
}
|
243 |
+
|
244 |
+
PYBIND11_TYPE_CASTER(T, const_name<std::is_integral<T>::value>("int", "float"));
|
245 |
+
};
|
246 |
+
|
247 |
+
template <typename T>
|
248 |
+
struct void_caster {
|
249 |
+
public:
|
250 |
+
bool load(handle src, bool) {
|
251 |
+
if (src && src.is_none()) {
|
252 |
+
return true;
|
253 |
+
}
|
254 |
+
return false;
|
255 |
+
}
|
256 |
+
static handle cast(T, return_value_policy /* policy */, handle /* parent */) {
|
257 |
+
return none().release();
|
258 |
+
}
|
259 |
+
PYBIND11_TYPE_CASTER(T, const_name("None"));
|
260 |
+
};
|
261 |
+
|
262 |
+
template <>
|
263 |
+
class type_caster<void_type> : public void_caster<void_type> {};
|
264 |
+
|
265 |
+
template <>
|
266 |
+
class type_caster<void> : public type_caster<void_type> {
|
267 |
+
public:
|
268 |
+
using type_caster<void_type>::cast;
|
269 |
+
|
270 |
+
bool load(handle h, bool) {
|
271 |
+
if (!h) {
|
272 |
+
return false;
|
273 |
+
}
|
274 |
+
if (h.is_none()) {
|
275 |
+
value = nullptr;
|
276 |
+
return true;
|
277 |
+
}
|
278 |
+
|
279 |
+
/* Check if this is a capsule */
|
280 |
+
if (isinstance<capsule>(h)) {
|
281 |
+
value = reinterpret_borrow<capsule>(h);
|
282 |
+
return true;
|
283 |
+
}
|
284 |
+
|
285 |
+
/* Check if this is a C++ type */
|
286 |
+
const auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr());
|
287 |
+
if (bases.size() == 1) { // Only allowing loading from a single-value type
|
288 |
+
value = values_and_holders(reinterpret_cast<instance *>(h.ptr())).begin()->value_ptr();
|
289 |
+
return true;
|
290 |
+
}
|
291 |
+
|
292 |
+
/* Fail */
|
293 |
+
return false;
|
294 |
+
}
|
295 |
+
|
296 |
+
static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) {
|
297 |
+
if (ptr) {
|
298 |
+
return capsule(ptr).release();
|
299 |
+
}
|
300 |
+
return none().release();
|
301 |
+
}
|
302 |
+
|
303 |
+
template <typename T>
|
304 |
+
using cast_op_type = void *&;
|
305 |
+
explicit operator void *&() { return value; }
|
306 |
+
static constexpr auto name = const_name("capsule");
|
307 |
+
|
308 |
+
private:
|
309 |
+
void *value = nullptr;
|
310 |
+
};
|
311 |
+
|
312 |
+
template <>
|
313 |
+
class type_caster<std::nullptr_t> : public void_caster<std::nullptr_t> {};
|
314 |
+
|
315 |
+
template <>
|
316 |
+
class type_caster<bool> {
|
317 |
+
public:
|
318 |
+
bool load(handle src, bool convert) {
|
319 |
+
if (!src) {
|
320 |
+
return false;
|
321 |
+
}
|
322 |
+
if (src.ptr() == Py_True) {
|
323 |
+
value = true;
|
324 |
+
return true;
|
325 |
+
}
|
326 |
+
if (src.ptr() == Py_False) {
|
327 |
+
value = false;
|
328 |
+
return true;
|
329 |
+
}
|
330 |
+
if (convert || is_numpy_bool(src)) {
|
331 |
+
// (allow non-implicit conversion for numpy booleans), use strncmp
|
332 |
+
// since NumPy 1.x had an additional trailing underscore.
|
333 |
+
|
334 |
+
Py_ssize_t res = -1;
|
335 |
+
if (src.is_none()) {
|
336 |
+
res = 0; // None is implicitly converted to False
|
337 |
+
}
|
338 |
+
#if defined(PYPY_VERSION)
|
339 |
+
// On PyPy, check that "__bool__" attr exists
|
340 |
+
else if (hasattr(src, PYBIND11_BOOL_ATTR)) {
|
341 |
+
res = PyObject_IsTrue(src.ptr());
|
342 |
+
}
|
343 |
+
#else
|
344 |
+
// Alternate approach for CPython: this does the same as the above, but optimized
|
345 |
+
// using the CPython API so as to avoid an unneeded attribute lookup.
|
346 |
+
else if (auto *tp_as_number = src.ptr()->ob_type->tp_as_number) {
|
347 |
+
if (PYBIND11_NB_BOOL(tp_as_number)) {
|
348 |
+
res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr());
|
349 |
+
}
|
350 |
+
}
|
351 |
+
#endif
|
352 |
+
if (res == 0 || res == 1) {
|
353 |
+
value = (res != 0);
|
354 |
+
return true;
|
355 |
+
}
|
356 |
+
PyErr_Clear();
|
357 |
+
}
|
358 |
+
return false;
|
359 |
+
}
|
360 |
+
static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) {
|
361 |
+
return handle(src ? Py_True : Py_False).inc_ref();
|
362 |
+
}
|
363 |
+
PYBIND11_TYPE_CASTER(bool, const_name("bool"));
|
364 |
+
|
365 |
+
private:
|
366 |
+
// Test if an object is a NumPy boolean (without fetching the type).
|
367 |
+
static inline bool is_numpy_bool(handle object) {
|
368 |
+
const char *type_name = Py_TYPE(object.ptr())->tp_name;
|
369 |
+
// Name changed to `numpy.bool` in NumPy 2, `numpy.bool_` is needed for 1.x support
|
370 |
+
return std::strcmp("numpy.bool", type_name) == 0
|
371 |
+
|| std::strcmp("numpy.bool_", type_name) == 0;
|
372 |
+
}
|
373 |
+
};
|
374 |
+
|
375 |
+
// Helper class for UTF-{8,16,32} C++ stl strings:
|
376 |
+
template <typename StringType, bool IsView = false>
|
377 |
+
struct string_caster {
|
378 |
+
using CharT = typename StringType::value_type;
|
379 |
+
|
380 |
+
// Simplify life by being able to assume standard char sizes (the standard only guarantees
|
381 |
+
// minimums, but Python requires exact sizes)
|
382 |
+
static_assert(!std::is_same<CharT, char>::value || sizeof(CharT) == 1,
|
383 |
+
"Unsupported char size != 1");
|
384 |
+
#if defined(PYBIND11_HAS_U8STRING)
|
385 |
+
static_assert(!std::is_same<CharT, char8_t>::value || sizeof(CharT) == 1,
|
386 |
+
"Unsupported char8_t size != 1");
|
387 |
+
#endif
|
388 |
+
static_assert(!std::is_same<CharT, char16_t>::value || sizeof(CharT) == 2,
|
389 |
+
"Unsupported char16_t size != 2");
|
390 |
+
static_assert(!std::is_same<CharT, char32_t>::value || sizeof(CharT) == 4,
|
391 |
+
"Unsupported char32_t size != 4");
|
392 |
+
// wchar_t can be either 16 bits (Windows) or 32 (everywhere else)
|
393 |
+
static_assert(!std::is_same<CharT, wchar_t>::value || sizeof(CharT) == 2 || sizeof(CharT) == 4,
|
394 |
+
"Unsupported wchar_t size != 2/4");
|
395 |
+
static constexpr size_t UTF_N = 8 * sizeof(CharT);
|
396 |
+
|
397 |
+
bool load(handle src, bool) {
|
398 |
+
handle load_src = src;
|
399 |
+
if (!src) {
|
400 |
+
return false;
|
401 |
+
}
|
402 |
+
if (!PyUnicode_Check(load_src.ptr())) {
|
403 |
+
return load_raw(load_src);
|
404 |
+
}
|
405 |
+
|
406 |
+
// For UTF-8 we avoid the need for a temporary `bytes` object by using
|
407 |
+
// `PyUnicode_AsUTF8AndSize`.
|
408 |
+
if (UTF_N == 8) {
|
409 |
+
Py_ssize_t size = -1;
|
410 |
+
const auto *buffer
|
411 |
+
= reinterpret_cast<const CharT *>(PyUnicode_AsUTF8AndSize(load_src.ptr(), &size));
|
412 |
+
if (!buffer) {
|
413 |
+
PyErr_Clear();
|
414 |
+
return false;
|
415 |
+
}
|
416 |
+
value = StringType(buffer, static_cast<size_t>(size));
|
417 |
+
return true;
|
418 |
+
}
|
419 |
+
|
420 |
+
auto utfNbytes
|
421 |
+
= reinterpret_steal<object>(PyUnicode_AsEncodedString(load_src.ptr(),
|
422 |
+
UTF_N == 8 ? "utf-8"
|
423 |
+
: UTF_N == 16 ? "utf-16"
|
424 |
+
: "utf-32",
|
425 |
+
nullptr));
|
426 |
+
if (!utfNbytes) {
|
427 |
+
PyErr_Clear();
|
428 |
+
return false;
|
429 |
+
}
|
430 |
+
|
431 |
+
const auto *buffer
|
432 |
+
= reinterpret_cast<const CharT *>(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr()));
|
433 |
+
size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT);
|
434 |
+
// Skip BOM for UTF-16/32
|
435 |
+
if (UTF_N > 8) {
|
436 |
+
buffer++;
|
437 |
+
length--;
|
438 |
+
}
|
439 |
+
value = StringType(buffer, length);
|
440 |
+
|
441 |
+
// If we're loading a string_view we need to keep the encoded Python object alive:
|
442 |
+
if (IsView) {
|
443 |
+
loader_life_support::add_patient(utfNbytes);
|
444 |
+
}
|
445 |
+
|
446 |
+
return true;
|
447 |
+
}
|
448 |
+
|
449 |
+
static handle
|
450 |
+
cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) {
|
451 |
+
const char *buffer = reinterpret_cast<const char *>(src.data());
|
452 |
+
auto nbytes = ssize_t(src.size() * sizeof(CharT));
|
453 |
+
handle s = decode_utfN(buffer, nbytes);
|
454 |
+
if (!s) {
|
455 |
+
throw error_already_set();
|
456 |
+
}
|
457 |
+
return s;
|
458 |
+
}
|
459 |
+
|
460 |
+
PYBIND11_TYPE_CASTER(StringType, const_name(PYBIND11_STRING_NAME));
|
461 |
+
|
462 |
+
private:
|
463 |
+
static handle decode_utfN(const char *buffer, ssize_t nbytes) {
|
464 |
+
#if !defined(PYPY_VERSION)
|
465 |
+
return UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr)
|
466 |
+
: UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr)
|
467 |
+
: PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr);
|
468 |
+
#else
|
469 |
+
// PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as
|
470 |
+
// well), so bypass the whole thing by just passing the encoding as a string value, which
|
471 |
+
// works properly:
|
472 |
+
return PyUnicode_Decode(buffer,
|
473 |
+
nbytes,
|
474 |
+
UTF_N == 8 ? "utf-8"
|
475 |
+
: UTF_N == 16 ? "utf-16"
|
476 |
+
: "utf-32",
|
477 |
+
nullptr);
|
478 |
+
#endif
|
479 |
+
}
|
480 |
+
|
481 |
+
// When loading into a std::string or char*, accept a bytes/bytearray object as-is (i.e.
|
482 |
+
// without any encoding/decoding attempt). For other C++ char sizes this is a no-op.
|
483 |
+
// which supports loading a unicode from a str, doesn't take this path.
|
484 |
+
template <typename C = CharT>
|
485 |
+
bool load_raw(enable_if_t<std::is_same<C, char>::value, handle> src) {
|
486 |
+
if (PYBIND11_BYTES_CHECK(src.ptr())) {
|
487 |
+
// We were passed raw bytes; accept it into a std::string or char*
|
488 |
+
// without any encoding attempt.
|
489 |
+
const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr());
|
490 |
+
if (!bytes) {
|
491 |
+
pybind11_fail("Unexpected PYBIND11_BYTES_AS_STRING() failure.");
|
492 |
+
}
|
493 |
+
value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr()));
|
494 |
+
return true;
|
495 |
+
}
|
496 |
+
if (PyByteArray_Check(src.ptr())) {
|
497 |
+
// We were passed a bytearray; accept it into a std::string or char*
|
498 |
+
// without any encoding attempt.
|
499 |
+
const char *bytearray = PyByteArray_AsString(src.ptr());
|
500 |
+
if (!bytearray) {
|
501 |
+
pybind11_fail("Unexpected PyByteArray_AsString() failure.");
|
502 |
+
}
|
503 |
+
value = StringType(bytearray, (size_t) PyByteArray_Size(src.ptr()));
|
504 |
+
return true;
|
505 |
+
}
|
506 |
+
|
507 |
+
return false;
|
508 |
+
}
|
509 |
+
|
510 |
+
template <typename C = CharT>
|
511 |
+
bool load_raw(enable_if_t<!std::is_same<C, char>::value, handle>) {
|
512 |
+
return false;
|
513 |
+
}
|
514 |
+
};
|
515 |
+
|
516 |
+
template <typename CharT, class Traits, class Allocator>
|
517 |
+
struct type_caster<std::basic_string<CharT, Traits, Allocator>,
|
518 |
+
enable_if_t<is_std_char_type<CharT>::value>>
|
519 |
+
: string_caster<std::basic_string<CharT, Traits, Allocator>> {};
|
520 |
+
|
521 |
+
#ifdef PYBIND11_HAS_STRING_VIEW
|
522 |
+
template <typename CharT, class Traits>
|
523 |
+
struct type_caster<std::basic_string_view<CharT, Traits>,
|
524 |
+
enable_if_t<is_std_char_type<CharT>::value>>
|
525 |
+
: string_caster<std::basic_string_view<CharT, Traits>, true> {};
|
526 |
+
#endif
|
527 |
+
|
528 |
+
// Type caster for C-style strings. We basically use a std::string type caster, but also add the
|
529 |
+
// ability to use None as a nullptr char* (which the string caster doesn't allow).
|
530 |
+
template <typename CharT>
|
531 |
+
struct type_caster<CharT, enable_if_t<is_std_char_type<CharT>::value>> {
|
532 |
+
using StringType = std::basic_string<CharT>;
|
533 |
+
using StringCaster = make_caster<StringType>;
|
534 |
+
StringCaster str_caster;
|
535 |
+
bool none = false;
|
536 |
+
CharT one_char = 0;
|
537 |
+
|
538 |
+
public:
|
539 |
+
bool load(handle src, bool convert) {
|
540 |
+
if (!src) {
|
541 |
+
return false;
|
542 |
+
}
|
543 |
+
if (src.is_none()) {
|
544 |
+
// Defer accepting None to other overloads (if we aren't in convert mode):
|
545 |
+
if (!convert) {
|
546 |
+
return false;
|
547 |
+
}
|
548 |
+
none = true;
|
549 |
+
return true;
|
550 |
+
}
|
551 |
+
return str_caster.load(src, convert);
|
552 |
+
}
|
553 |
+
|
554 |
+
static handle cast(const CharT *src, return_value_policy policy, handle parent) {
|
555 |
+
if (src == nullptr) {
|
556 |
+
return pybind11::none().release();
|
557 |
+
}
|
558 |
+
return StringCaster::cast(StringType(src), policy, parent);
|
559 |
+
}
|
560 |
+
|
561 |
+
static handle cast(CharT src, return_value_policy policy, handle parent) {
|
562 |
+
if (std::is_same<char, CharT>::value) {
|
563 |
+
handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr);
|
564 |
+
if (!s) {
|
565 |
+
throw error_already_set();
|
566 |
+
}
|
567 |
+
return s;
|
568 |
+
}
|
569 |
+
return StringCaster::cast(StringType(1, src), policy, parent);
|
570 |
+
}
|
571 |
+
|
572 |
+
explicit operator CharT *() {
|
573 |
+
return none ? nullptr : const_cast<CharT *>(static_cast<StringType &>(str_caster).c_str());
|
574 |
+
}
|
575 |
+
explicit operator CharT &() {
|
576 |
+
if (none) {
|
577 |
+
throw value_error("Cannot convert None to a character");
|
578 |
+
}
|
579 |
+
|
580 |
+
auto &value = static_cast<StringType &>(str_caster);
|
581 |
+
size_t str_len = value.size();
|
582 |
+
if (str_len == 0) {
|
583 |
+
throw value_error("Cannot convert empty string to a character");
|
584 |
+
}
|
585 |
+
|
586 |
+
// If we're in UTF-8 mode, we have two possible failures: one for a unicode character that
|
587 |
+
// is too high, and one for multiple unicode characters (caught later), so we need to
|
588 |
+
// figure out how long the first encoded character is in bytes to distinguish between these
|
589 |
+
// two errors. We also allow want to allow unicode characters U+0080 through U+00FF, as
|
590 |
+
// those can fit into a single char value.
|
591 |
+
if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) {
|
592 |
+
auto v0 = static_cast<unsigned char>(value[0]);
|
593 |
+
// low bits only: 0-127
|
594 |
+
// 0b110xxxxx - start of 2-byte sequence
|
595 |
+
// 0b1110xxxx - start of 3-byte sequence
|
596 |
+
// 0b11110xxx - start of 4-byte sequence
|
597 |
+
size_t char0_bytes = (v0 & 0x80) == 0 ? 1
|
598 |
+
: (v0 & 0xE0) == 0xC0 ? 2
|
599 |
+
: (v0 & 0xF0) == 0xE0 ? 3
|
600 |
+
: 4;
|
601 |
+
|
602 |
+
if (char0_bytes == str_len) {
|
603 |
+
// If we have a 128-255 value, we can decode it into a single char:
|
604 |
+
if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx
|
605 |
+
one_char = static_cast<CharT>(((v0 & 3) << 6)
|
606 |
+
+ (static_cast<unsigned char>(value[1]) & 0x3F));
|
607 |
+
return one_char;
|
608 |
+
}
|
609 |
+
// Otherwise we have a single character, but it's > U+00FF
|
610 |
+
throw value_error("Character code point not in range(0x100)");
|
611 |
+
}
|
612 |
+
}
|
613 |
+
|
614 |
+
// UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a
|
615 |
+
// surrogate pair with total length 2 instantly indicates a range error (but not a "your
|
616 |
+
// string was too long" error).
|
617 |
+
else if (StringCaster::UTF_N == 16 && str_len == 2) {
|
618 |
+
one_char = static_cast<CharT>(value[0]);
|
619 |
+
if (one_char >= 0xD800 && one_char < 0xE000) {
|
620 |
+
throw value_error("Character code point not in range(0x10000)");
|
621 |
+
}
|
622 |
+
}
|
623 |
+
|
624 |
+
if (str_len != 1) {
|
625 |
+
throw value_error("Expected a character, but multi-character string found");
|
626 |
+
}
|
627 |
+
|
628 |
+
one_char = value[0];
|
629 |
+
return one_char;
|
630 |
+
}
|
631 |
+
|
632 |
+
static constexpr auto name = const_name(PYBIND11_STRING_NAME);
|
633 |
+
template <typename _T>
|
634 |
+
using cast_op_type = pybind11::detail::cast_op_type<_T>;
|
635 |
+
};
|
636 |
+
|
637 |
+
// Base implementation for std::tuple and std::pair
|
638 |
+
template <template <typename...> class Tuple, typename... Ts>
|
639 |
+
class tuple_caster {
|
640 |
+
using type = Tuple<Ts...>;
|
641 |
+
static constexpr auto size = sizeof...(Ts);
|
642 |
+
using indices = make_index_sequence<size>;
|
643 |
+
|
644 |
+
public:
|
645 |
+
bool load(handle src, bool convert) {
|
646 |
+
if (!isinstance<sequence>(src)) {
|
647 |
+
return false;
|
648 |
+
}
|
649 |
+
const auto seq = reinterpret_borrow<sequence>(src);
|
650 |
+
if (seq.size() != size) {
|
651 |
+
return false;
|
652 |
+
}
|
653 |
+
return load_impl(seq, convert, indices{});
|
654 |
+
}
|
655 |
+
|
656 |
+
template <typename T>
|
657 |
+
static handle cast(T &&src, return_value_policy policy, handle parent) {
|
658 |
+
return cast_impl(std::forward<T>(src), policy, parent, indices{});
|
659 |
+
}
|
660 |
+
|
661 |
+
// copied from the PYBIND11_TYPE_CASTER macro
|
662 |
+
template <typename T>
|
663 |
+
static handle cast(T *src, return_value_policy policy, handle parent) {
|
664 |
+
if (!src) {
|
665 |
+
return none().release();
|
666 |
+
}
|
667 |
+
if (policy == return_value_policy::take_ownership) {
|
668 |
+
auto h = cast(std::move(*src), policy, parent);
|
669 |
+
delete src;
|
670 |
+
return h;
|
671 |
+
}
|
672 |
+
return cast(*src, policy, parent);
|
673 |
+
}
|
674 |
+
|
675 |
+
static constexpr auto name = const_name("tuple[")
|
676 |
+
+ ::pybind11::detail::concat(make_caster<Ts>::name...)
|
677 |
+
+ const_name("]");
|
678 |
+
|
679 |
+
template <typename T>
|
680 |
+
using cast_op_type = type;
|
681 |
+
|
682 |
+
explicit operator type() & { return implicit_cast(indices{}); }
|
683 |
+
explicit operator type() && { return std::move(*this).implicit_cast(indices{}); }
|
684 |
+
|
685 |
+
protected:
|
686 |
+
template <size_t... Is>
|
687 |
+
type implicit_cast(index_sequence<Is...>) & {
|
688 |
+
return type(cast_op<Ts>(std::get<Is>(subcasters))...);
|
689 |
+
}
|
690 |
+
template <size_t... Is>
|
691 |
+
type implicit_cast(index_sequence<Is...>) && {
|
692 |
+
return type(cast_op<Ts>(std::move(std::get<Is>(subcasters)))...);
|
693 |
+
}
|
694 |
+
|
695 |
+
static constexpr bool load_impl(const sequence &, bool, index_sequence<>) { return true; }
|
696 |
+
|
697 |
+
template <size_t... Is>
|
698 |
+
bool load_impl(const sequence &seq, bool convert, index_sequence<Is...>) {
|
699 |
+
#ifdef __cpp_fold_expressions
|
700 |
+
if ((... || !std::get<Is>(subcasters).load(seq[Is], convert))) {
|
701 |
+
return false;
|
702 |
+
}
|
703 |
+
#else
|
704 |
+
for (bool r : {std::get<Is>(subcasters).load(seq[Is], convert)...}) {
|
705 |
+
if (!r) {
|
706 |
+
return false;
|
707 |
+
}
|
708 |
+
}
|
709 |
+
#endif
|
710 |
+
return true;
|
711 |
+
}
|
712 |
+
|
713 |
+
/* Implementation: Convert a C++ tuple into a Python tuple */
|
714 |
+
template <typename T, size_t... Is>
|
715 |
+
static handle
|
716 |
+
cast_impl(T &&src, return_value_policy policy, handle parent, index_sequence<Is...>) {
|
717 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(src, policy, parent);
|
718 |
+
PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(policy, parent);
|
719 |
+
std::array<object, size> entries{{reinterpret_steal<object>(
|
720 |
+
make_caster<Ts>::cast(std::get<Is>(std::forward<T>(src)), policy, parent))...}};
|
721 |
+
for (const auto &entry : entries) {
|
722 |
+
if (!entry) {
|
723 |
+
return handle();
|
724 |
+
}
|
725 |
+
}
|
726 |
+
tuple result(size);
|
727 |
+
int counter = 0;
|
728 |
+
for (auto &entry : entries) {
|
729 |
+
PyTuple_SET_ITEM(result.ptr(), counter++, entry.release().ptr());
|
730 |
+
}
|
731 |
+
return result.release();
|
732 |
+
}
|
733 |
+
|
734 |
+
Tuple<make_caster<Ts>...> subcasters;
|
735 |
+
};
|
736 |
+
|
737 |
+
template <typename T1, typename T2>
|
738 |
+
class type_caster<std::pair<T1, T2>> : public tuple_caster<std::pair, T1, T2> {};
|
739 |
+
|
740 |
+
template <typename... Ts>
|
741 |
+
class type_caster<std::tuple<Ts...>> : public tuple_caster<std::tuple, Ts...> {};
|
742 |
+
|
743 |
+
/// Helper class which abstracts away certain actions. Users can provide specializations for
|
744 |
+
/// custom holders, but it's only necessary if the type has a non-standard interface.
|
745 |
+
template <typename T>
|
746 |
+
struct holder_helper {
|
747 |
+
static auto get(const T &p) -> decltype(p.get()) { return p.get(); }
|
748 |
+
};
|
749 |
+
|
750 |
+
/// Type caster for holder types like std::shared_ptr, etc.
|
751 |
+
/// The SFINAE hook is provided to help work around the current lack of support
|
752 |
+
/// for smart-pointer interoperability. Please consider it an implementation
|
753 |
+
/// detail that may change in the future, as formal support for smart-pointer
|
754 |
+
/// interoperability is added into pybind11.
|
755 |
+
template <typename type, typename holder_type, typename SFINAE = void>
|
756 |
+
struct copyable_holder_caster : public type_caster_base<type> {
|
757 |
+
public:
|
758 |
+
using base = type_caster_base<type>;
|
759 |
+
static_assert(std::is_base_of<base, type_caster<type>>::value,
|
760 |
+
"Holder classes are only supported for custom types");
|
761 |
+
using base::base;
|
762 |
+
using base::cast;
|
763 |
+
using base::typeinfo;
|
764 |
+
using base::value;
|
765 |
+
|
766 |
+
bool load(handle src, bool convert) {
|
767 |
+
return base::template load_impl<copyable_holder_caster<type, holder_type>>(src, convert);
|
768 |
+
}
|
769 |
+
|
770 |
+
explicit operator type *() { return this->value; }
|
771 |
+
// static_cast works around compiler error with MSVC 17 and CUDA 10.2
|
772 |
+
// see issue #2180
|
773 |
+
explicit operator type &() { return *(static_cast<type *>(this->value)); }
|
774 |
+
explicit operator holder_type *() { return std::addressof(holder); }
|
775 |
+
explicit operator holder_type &() { return holder; }
|
776 |
+
|
777 |
+
static handle cast(const holder_type &src, return_value_policy, handle) {
|
778 |
+
const auto *ptr = holder_helper<holder_type>::get(src);
|
779 |
+
return type_caster_base<type>::cast_holder(ptr, &src);
|
780 |
+
}
|
781 |
+
|
782 |
+
protected:
|
783 |
+
friend class type_caster_generic;
|
784 |
+
void check_holder_compat() {
|
785 |
+
if (typeinfo->default_holder) {
|
786 |
+
throw cast_error("Unable to load a custom holder type from a default-holder instance");
|
787 |
+
}
|
788 |
+
}
|
789 |
+
|
790 |
+
bool load_value(value_and_holder &&v_h) {
|
791 |
+
if (v_h.holder_constructed()) {
|
792 |
+
value = v_h.value_ptr();
|
793 |
+
holder = v_h.template holder<holder_type>();
|
794 |
+
return true;
|
795 |
+
}
|
796 |
+
throw cast_error("Unable to cast from non-held to held instance (T& to Holder<T>) "
|
797 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
798 |
+
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
|
799 |
+
"type information)");
|
800 |
+
#else
|
801 |
+
"of type '"
|
802 |
+
+ type_id<holder_type>() + "''");
|
803 |
+
#endif
|
804 |
+
}
|
805 |
+
|
806 |
+
template <typename T = holder_type,
|
807 |
+
detail::enable_if_t<!std::is_constructible<T, const T &, type *>::value, int> = 0>
|
808 |
+
bool try_implicit_casts(handle, bool) {
|
809 |
+
return false;
|
810 |
+
}
|
811 |
+
|
812 |
+
template <typename T = holder_type,
|
813 |
+
detail::enable_if_t<std::is_constructible<T, const T &, type *>::value, int> = 0>
|
814 |
+
bool try_implicit_casts(handle src, bool convert) {
|
815 |
+
for (auto &cast : typeinfo->implicit_casts) {
|
816 |
+
copyable_holder_caster sub_caster(*cast.first);
|
817 |
+
if (sub_caster.load(src, convert)) {
|
818 |
+
value = cast.second(sub_caster.value);
|
819 |
+
holder = holder_type(sub_caster.holder, (type *) value);
|
820 |
+
return true;
|
821 |
+
}
|
822 |
+
}
|
823 |
+
return false;
|
824 |
+
}
|
825 |
+
|
826 |
+
static bool try_direct_conversions(handle) { return false; }
|
827 |
+
|
828 |
+
holder_type holder;
|
829 |
+
};
|
830 |
+
|
831 |
+
/// Specialize for the common std::shared_ptr, so users don't need to
|
832 |
+
template <typename T>
|
833 |
+
class type_caster<std::shared_ptr<T>> : public copyable_holder_caster<T, std::shared_ptr<T>> {};
|
834 |
+
|
835 |
+
/// Type caster for holder types like std::unique_ptr.
|
836 |
+
/// Please consider the SFINAE hook an implementation detail, as explained
|
837 |
+
/// in the comment for the copyable_holder_caster.
|
838 |
+
template <typename type, typename holder_type, typename SFINAE = void>
|
839 |
+
struct move_only_holder_caster {
|
840 |
+
static_assert(std::is_base_of<type_caster_base<type>, type_caster<type>>::value,
|
841 |
+
"Holder classes are only supported for custom types");
|
842 |
+
|
843 |
+
static handle cast(holder_type &&src, return_value_policy, handle) {
|
844 |
+
auto *ptr = holder_helper<holder_type>::get(src);
|
845 |
+
return type_caster_base<type>::cast_holder(ptr, std::addressof(src));
|
846 |
+
}
|
847 |
+
static constexpr auto name = type_caster_base<type>::name;
|
848 |
+
};
|
849 |
+
|
850 |
+
template <typename type, typename deleter>
|
851 |
+
class type_caster<std::unique_ptr<type, deleter>>
|
852 |
+
: public move_only_holder_caster<type, std::unique_ptr<type, deleter>> {};
|
853 |
+
|
854 |
+
template <typename type, typename holder_type>
|
855 |
+
using type_caster_holder = conditional_t<is_copy_constructible<holder_type>::value,
|
856 |
+
copyable_holder_caster<type, holder_type>,
|
857 |
+
move_only_holder_caster<type, holder_type>>;
|
858 |
+
|
859 |
+
template <typename T, bool Value = false>
|
860 |
+
struct always_construct_holder {
|
861 |
+
static constexpr bool value = Value;
|
862 |
+
};
|
863 |
+
|
864 |
+
/// Create a specialization for custom holder types (silently ignores std::shared_ptr)
|
865 |
+
#define PYBIND11_DECLARE_HOLDER_TYPE(type, holder_type, ...) \
|
866 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
|
867 |
+
namespace detail { \
|
868 |
+
template <typename type> \
|
869 |
+
struct always_construct_holder<holder_type> : always_construct_holder<void, ##__VA_ARGS__> { \
|
870 |
+
}; \
|
871 |
+
template <typename type> \
|
872 |
+
class type_caster<holder_type, enable_if_t<!is_shared_ptr<holder_type>::value>> \
|
873 |
+
: public type_caster_holder<type, holder_type> {}; \
|
874 |
+
} \
|
875 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
876 |
+
|
877 |
+
// PYBIND11_DECLARE_HOLDER_TYPE holder types:
|
878 |
+
template <typename base, typename holder>
|
879 |
+
struct is_holder_type
|
880 |
+
: std::is_base_of<detail::type_caster_holder<base, holder>, detail::type_caster<holder>> {};
|
881 |
+
// Specialization for always-supported unique_ptr holders:
|
882 |
+
template <typename base, typename deleter>
|
883 |
+
struct is_holder_type<base, std::unique_ptr<base, deleter>> : std::true_type {};
|
884 |
+
|
885 |
+
#ifdef PYBIND11_DISABLE_HANDLE_TYPE_NAME_DEFAULT_IMPLEMENTATION // See PR #4888
|
886 |
+
|
887 |
+
// This leads to compilation errors if a specialization is missing.
|
888 |
+
template <typename T>
|
889 |
+
struct handle_type_name;
|
890 |
+
|
891 |
+
#else
|
892 |
+
|
893 |
+
template <typename T>
|
894 |
+
struct handle_type_name {
|
895 |
+
static constexpr auto name = const_name<T>();
|
896 |
+
};
|
897 |
+
|
898 |
+
#endif
|
899 |
+
|
900 |
+
template <>
|
901 |
+
struct handle_type_name<object> {
|
902 |
+
static constexpr auto name = const_name("object");
|
903 |
+
};
|
904 |
+
template <>
|
905 |
+
struct handle_type_name<list> {
|
906 |
+
static constexpr auto name = const_name("list");
|
907 |
+
};
|
908 |
+
template <>
|
909 |
+
struct handle_type_name<dict> {
|
910 |
+
static constexpr auto name = const_name("dict");
|
911 |
+
};
|
912 |
+
template <>
|
913 |
+
struct handle_type_name<anyset> {
|
914 |
+
static constexpr auto name = const_name("Union[set, frozenset]");
|
915 |
+
};
|
916 |
+
template <>
|
917 |
+
struct handle_type_name<set> {
|
918 |
+
static constexpr auto name = const_name("set");
|
919 |
+
};
|
920 |
+
template <>
|
921 |
+
struct handle_type_name<frozenset> {
|
922 |
+
static constexpr auto name = const_name("frozenset");
|
923 |
+
};
|
924 |
+
template <>
|
925 |
+
struct handle_type_name<str> {
|
926 |
+
static constexpr auto name = const_name("str");
|
927 |
+
};
|
928 |
+
template <>
|
929 |
+
struct handle_type_name<tuple> {
|
930 |
+
static constexpr auto name = const_name("tuple");
|
931 |
+
};
|
932 |
+
template <>
|
933 |
+
struct handle_type_name<bool_> {
|
934 |
+
static constexpr auto name = const_name("bool");
|
935 |
+
};
|
936 |
+
template <>
|
937 |
+
struct handle_type_name<bytes> {
|
938 |
+
static constexpr auto name = const_name(PYBIND11_BYTES_NAME);
|
939 |
+
};
|
940 |
+
template <>
|
941 |
+
struct handle_type_name<buffer> {
|
942 |
+
static constexpr auto name = const_name("Buffer");
|
943 |
+
};
|
944 |
+
template <>
|
945 |
+
struct handle_type_name<int_> {
|
946 |
+
static constexpr auto name = const_name("int");
|
947 |
+
};
|
948 |
+
template <>
|
949 |
+
struct handle_type_name<iterable> {
|
950 |
+
static constexpr auto name = const_name("Iterable");
|
951 |
+
};
|
952 |
+
template <>
|
953 |
+
struct handle_type_name<iterator> {
|
954 |
+
static constexpr auto name = const_name("Iterator");
|
955 |
+
};
|
956 |
+
template <>
|
957 |
+
struct handle_type_name<float_> {
|
958 |
+
static constexpr auto name = const_name("float");
|
959 |
+
};
|
960 |
+
template <>
|
961 |
+
struct handle_type_name<function> {
|
962 |
+
static constexpr auto name = const_name("Callable");
|
963 |
+
};
|
964 |
+
template <>
|
965 |
+
struct handle_type_name<handle> {
|
966 |
+
static constexpr auto name = handle_type_name<object>::name;
|
967 |
+
};
|
968 |
+
template <>
|
969 |
+
struct handle_type_name<none> {
|
970 |
+
static constexpr auto name = const_name("None");
|
971 |
+
};
|
972 |
+
template <>
|
973 |
+
struct handle_type_name<sequence> {
|
974 |
+
static constexpr auto name = const_name("Sequence");
|
975 |
+
};
|
976 |
+
template <>
|
977 |
+
struct handle_type_name<bytearray> {
|
978 |
+
static constexpr auto name = const_name("bytearray");
|
979 |
+
};
|
980 |
+
template <>
|
981 |
+
struct handle_type_name<memoryview> {
|
982 |
+
static constexpr auto name = const_name("memoryview");
|
983 |
+
};
|
984 |
+
template <>
|
985 |
+
struct handle_type_name<slice> {
|
986 |
+
static constexpr auto name = const_name("slice");
|
987 |
+
};
|
988 |
+
template <>
|
989 |
+
struct handle_type_name<type> {
|
990 |
+
static constexpr auto name = const_name("type");
|
991 |
+
};
|
992 |
+
template <>
|
993 |
+
struct handle_type_name<capsule> {
|
994 |
+
static constexpr auto name = const_name("capsule");
|
995 |
+
};
|
996 |
+
template <>
|
997 |
+
struct handle_type_name<ellipsis> {
|
998 |
+
static constexpr auto name = const_name("ellipsis");
|
999 |
+
};
|
1000 |
+
template <>
|
1001 |
+
struct handle_type_name<weakref> {
|
1002 |
+
static constexpr auto name = const_name("weakref");
|
1003 |
+
};
|
1004 |
+
template <>
|
1005 |
+
struct handle_type_name<args> {
|
1006 |
+
static constexpr auto name = const_name("*args");
|
1007 |
+
};
|
1008 |
+
template <>
|
1009 |
+
struct handle_type_name<kwargs> {
|
1010 |
+
static constexpr auto name = const_name("**kwargs");
|
1011 |
+
};
|
1012 |
+
template <>
|
1013 |
+
struct handle_type_name<obj_attr_accessor> {
|
1014 |
+
static constexpr auto name = const_name<obj_attr_accessor>();
|
1015 |
+
};
|
1016 |
+
template <>
|
1017 |
+
struct handle_type_name<str_attr_accessor> {
|
1018 |
+
static constexpr auto name = const_name<str_attr_accessor>();
|
1019 |
+
};
|
1020 |
+
template <>
|
1021 |
+
struct handle_type_name<item_accessor> {
|
1022 |
+
static constexpr auto name = const_name<item_accessor>();
|
1023 |
+
};
|
1024 |
+
template <>
|
1025 |
+
struct handle_type_name<sequence_accessor> {
|
1026 |
+
static constexpr auto name = const_name<sequence_accessor>();
|
1027 |
+
};
|
1028 |
+
template <>
|
1029 |
+
struct handle_type_name<list_accessor> {
|
1030 |
+
static constexpr auto name = const_name<list_accessor>();
|
1031 |
+
};
|
1032 |
+
template <>
|
1033 |
+
struct handle_type_name<tuple_accessor> {
|
1034 |
+
static constexpr auto name = const_name<tuple_accessor>();
|
1035 |
+
};
|
1036 |
+
|
1037 |
+
template <typename type>
|
1038 |
+
struct pyobject_caster {
|
1039 |
+
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
|
1040 |
+
pyobject_caster() : value() {}
|
1041 |
+
|
1042 |
+
// `type` may not be default constructible (e.g. frozenset, anyset). Initializing `value`
|
1043 |
+
// to a nil handle is safe since it will only be accessed if `load` succeeds.
|
1044 |
+
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
|
1045 |
+
pyobject_caster() : value(reinterpret_steal<type>(handle())) {}
|
1046 |
+
|
1047 |
+
template <typename T = type, enable_if_t<std::is_same<T, handle>::value, int> = 0>
|
1048 |
+
bool load(handle src, bool /* convert */) {
|
1049 |
+
value = src;
|
1050 |
+
return static_cast<bool>(value);
|
1051 |
+
}
|
1052 |
+
|
1053 |
+
template <typename T = type, enable_if_t<std::is_base_of<object, T>::value, int> = 0>
|
1054 |
+
bool load(handle src, bool /* convert */) {
|
1055 |
+
if (!isinstance<type>(src)) {
|
1056 |
+
return false;
|
1057 |
+
}
|
1058 |
+
value = reinterpret_borrow<type>(src);
|
1059 |
+
return true;
|
1060 |
+
}
|
1061 |
+
|
1062 |
+
static handle cast(const handle &src, return_value_policy /* policy */, handle /* parent */) {
|
1063 |
+
return src.inc_ref();
|
1064 |
+
}
|
1065 |
+
PYBIND11_TYPE_CASTER(type, handle_type_name<type>::name);
|
1066 |
+
};
|
1067 |
+
|
1068 |
+
template <typename T>
|
1069 |
+
class type_caster<T, enable_if_t<is_pyobject<T>::value>> : public pyobject_caster<T> {};
|
1070 |
+
|
1071 |
+
// Our conditions for enabling moving are quite restrictive:
|
1072 |
+
// At compile time:
|
1073 |
+
// - T needs to be a non-const, non-pointer, non-reference type
|
1074 |
+
// - type_caster<T>::operator T&() must exist
|
1075 |
+
// - the type must be move constructible (obviously)
|
1076 |
+
// At run-time:
|
1077 |
+
// - if the type is non-copy-constructible, the object must be the sole owner of the type (i.e. it
|
1078 |
+
// must have ref_count() == 1)h
|
1079 |
+
// If any of the above are not satisfied, we fall back to copying.
|
1080 |
+
template <typename T>
|
1081 |
+
using move_is_plain_type
|
1082 |
+
= satisfies_none_of<T, std::is_void, std::is_pointer, std::is_reference, std::is_const>;
|
1083 |
+
template <typename T, typename SFINAE = void>
|
1084 |
+
struct move_always : std::false_type {};
|
1085 |
+
template <typename T>
|
1086 |
+
struct move_always<
|
1087 |
+
T,
|
1088 |
+
enable_if_t<
|
1089 |
+
all_of<move_is_plain_type<T>,
|
1090 |
+
negation<is_copy_constructible<T>>,
|
1091 |
+
is_move_constructible<T>,
|
1092 |
+
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
|
1093 |
+
: std::true_type {};
|
1094 |
+
template <typename T, typename SFINAE = void>
|
1095 |
+
struct move_if_unreferenced : std::false_type {};
|
1096 |
+
template <typename T>
|
1097 |
+
struct move_if_unreferenced<
|
1098 |
+
T,
|
1099 |
+
enable_if_t<
|
1100 |
+
all_of<move_is_plain_type<T>,
|
1101 |
+
negation<move_always<T>>,
|
1102 |
+
is_move_constructible<T>,
|
1103 |
+
std::is_same<decltype(std::declval<make_caster<T>>().operator T &()), T &>>::value>>
|
1104 |
+
: std::true_type {};
|
1105 |
+
template <typename T>
|
1106 |
+
using move_never = none_of<move_always<T>, move_if_unreferenced<T>>;
|
1107 |
+
|
1108 |
+
// Detect whether returning a `type` from a cast on type's type_caster is going to result in a
|
1109 |
+
// reference or pointer to a local variable of the type_caster. Basically, only
|
1110 |
+
// non-reference/pointer `type`s and reference/pointers from a type_caster_generic are safe;
|
1111 |
+
// everything else returns a reference/pointer to a local variable.
|
1112 |
+
template <typename type>
|
1113 |
+
using cast_is_temporary_value_reference
|
1114 |
+
= bool_constant<(std::is_reference<type>::value || std::is_pointer<type>::value)
|
1115 |
+
&& !std::is_base_of<type_caster_generic, make_caster<type>>::value
|
1116 |
+
&& !std::is_same<intrinsic_t<type>, void>::value>;
|
1117 |
+
|
1118 |
+
// When a value returned from a C++ function is being cast back to Python, we almost always want to
|
1119 |
+
// force `policy = move`, regardless of the return value policy the function/method was declared
|
1120 |
+
// with.
|
1121 |
+
template <typename Return, typename SFINAE = void>
|
1122 |
+
struct return_value_policy_override {
|
1123 |
+
static return_value_policy policy(return_value_policy p) { return p; }
|
1124 |
+
};
|
1125 |
+
|
1126 |
+
template <typename Return>
|
1127 |
+
struct return_value_policy_override<
|
1128 |
+
Return,
|
1129 |
+
detail::enable_if_t<std::is_base_of<type_caster_generic, make_caster<Return>>::value, void>> {
|
1130 |
+
static return_value_policy policy(return_value_policy p) {
|
1131 |
+
return !std::is_lvalue_reference<Return>::value && !std::is_pointer<Return>::value
|
1132 |
+
? return_value_policy::move
|
1133 |
+
: p;
|
1134 |
+
}
|
1135 |
+
};
|
1136 |
+
|
1137 |
+
// Basic python -> C++ casting; throws if casting fails
|
1138 |
+
template <typename T, typename SFINAE>
|
1139 |
+
type_caster<T, SFINAE> &load_type(type_caster<T, SFINAE> &conv, const handle &handle) {
|
1140 |
+
static_assert(!detail::is_pyobject<T>::value,
|
1141 |
+
"Internal error: type_caster should only be used for C++ types");
|
1142 |
+
if (!conv.load(handle, true)) {
|
1143 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1144 |
+
throw cast_error(
|
1145 |
+
"Unable to cast Python instance of type "
|
1146 |
+
+ str(type::handle_of(handle)).cast<std::string>()
|
1147 |
+
+ " to C++ type '?' (#define "
|
1148 |
+
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
|
1149 |
+
#else
|
1150 |
+
throw cast_error("Unable to cast Python instance of type "
|
1151 |
+
+ str(type::handle_of(handle)).cast<std::string>() + " to C++ type '"
|
1152 |
+
+ type_id<T>() + "'");
|
1153 |
+
#endif
|
1154 |
+
}
|
1155 |
+
return conv;
|
1156 |
+
}
|
1157 |
+
// Wrapper around the above that also constructs and returns a type_caster
|
1158 |
+
template <typename T>
|
1159 |
+
make_caster<T> load_type(const handle &handle) {
|
1160 |
+
make_caster<T> conv;
|
1161 |
+
load_type(conv, handle);
|
1162 |
+
return conv;
|
1163 |
+
}
|
1164 |
+
|
1165 |
+
PYBIND11_NAMESPACE_END(detail)
|
1166 |
+
|
1167 |
+
// pytype -> C++ type
|
1168 |
+
template <typename T,
|
1169 |
+
detail::enable_if_t<!detail::is_pyobject<T>::value
|
1170 |
+
&& !detail::is_same_ignoring_cvref<T, PyObject *>::value,
|
1171 |
+
int>
|
1172 |
+
= 0>
|
1173 |
+
T cast(const handle &handle) {
|
1174 |
+
using namespace detail;
|
1175 |
+
static_assert(!cast_is_temporary_value_reference<T>::value,
|
1176 |
+
"Unable to cast type to reference: value is local to type caster");
|
1177 |
+
return cast_op<T>(load_type<T>(handle));
|
1178 |
+
}
|
1179 |
+
|
1180 |
+
// pytype -> pytype (calls converting constructor)
|
1181 |
+
template <typename T, detail::enable_if_t<detail::is_pyobject<T>::value, int> = 0>
|
1182 |
+
T cast(const handle &handle) {
|
1183 |
+
return T(reinterpret_borrow<object>(handle));
|
1184 |
+
}
|
1185 |
+
|
1186 |
+
// Note that `cast<PyObject *>(obj)` increments the reference count of `obj`.
|
1187 |
+
// This is necessary for the case that `obj` is a temporary, and could
|
1188 |
+
// not possibly be different, given
|
1189 |
+
// 1. the established convention that the passed `handle` is borrowed, and
|
1190 |
+
// 2. we don't want to force all generic code using `cast<T>()` to special-case
|
1191 |
+
// handling of `T` = `PyObject *` (to increment the reference count there).
|
1192 |
+
// It is the responsibility of the caller to ensure that the reference count
|
1193 |
+
// is decremented.
|
1194 |
+
template <typename T,
|
1195 |
+
typename Handle,
|
1196 |
+
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
|
1197 |
+
&& detail::is_same_ignoring_cvref<Handle, handle>::value,
|
1198 |
+
int>
|
1199 |
+
= 0>
|
1200 |
+
T cast(Handle &&handle) {
|
1201 |
+
return handle.inc_ref().ptr();
|
1202 |
+
}
|
1203 |
+
// To optimize way an inc_ref/dec_ref cycle:
|
1204 |
+
template <typename T,
|
1205 |
+
typename Object,
|
1206 |
+
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value
|
1207 |
+
&& detail::is_same_ignoring_cvref<Object, object>::value,
|
1208 |
+
int>
|
1209 |
+
= 0>
|
1210 |
+
T cast(Object &&obj) {
|
1211 |
+
return obj.release().ptr();
|
1212 |
+
}
|
1213 |
+
|
1214 |
+
// C++ type -> py::object
|
1215 |
+
template <typename T, detail::enable_if_t<!detail::is_pyobject<T>::value, int> = 0>
|
1216 |
+
object cast(T &&value,
|
1217 |
+
return_value_policy policy = return_value_policy::automatic_reference,
|
1218 |
+
handle parent = handle()) {
|
1219 |
+
using no_ref_T = typename std::remove_reference<T>::type;
|
1220 |
+
if (policy == return_value_policy::automatic) {
|
1221 |
+
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::take_ownership
|
1222 |
+
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
|
1223 |
+
: return_value_policy::move;
|
1224 |
+
} else if (policy == return_value_policy::automatic_reference) {
|
1225 |
+
policy = std::is_pointer<no_ref_T>::value ? return_value_policy::reference
|
1226 |
+
: std::is_lvalue_reference<T>::value ? return_value_policy::copy
|
1227 |
+
: return_value_policy::move;
|
1228 |
+
}
|
1229 |
+
return reinterpret_steal<object>(
|
1230 |
+
detail::make_caster<T>::cast(std::forward<T>(value), policy, parent));
|
1231 |
+
}
|
1232 |
+
|
1233 |
+
template <typename T>
|
1234 |
+
T handle::cast() const {
|
1235 |
+
return pybind11::cast<T>(*this);
|
1236 |
+
}
|
1237 |
+
template <>
|
1238 |
+
inline void handle::cast() const {
|
1239 |
+
return;
|
1240 |
+
}
|
1241 |
+
|
1242 |
+
template <typename T>
|
1243 |
+
detail::enable_if_t<!detail::move_never<T>::value, T> move(object &&obj) {
|
1244 |
+
if (obj.ref_count() > 1) {
|
1245 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1246 |
+
throw cast_error(
|
1247 |
+
"Unable to cast Python " + str(type::handle_of(obj)).cast<std::string>()
|
1248 |
+
+ " instance to C++ rvalue: instance has multiple references"
|
1249 |
+
" (#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
|
1250 |
+
#else
|
1251 |
+
throw cast_error("Unable to move from Python "
|
1252 |
+
+ str(type::handle_of(obj)).cast<std::string>() + " instance to C++ "
|
1253 |
+
+ type_id<T>() + " instance: instance has multiple references");
|
1254 |
+
#endif
|
1255 |
+
}
|
1256 |
+
|
1257 |
+
// Move into a temporary and return that, because the reference may be a local value of `conv`
|
1258 |
+
T ret = std::move(detail::load_type<T>(obj).operator T &());
|
1259 |
+
return ret;
|
1260 |
+
}
|
1261 |
+
|
1262 |
+
// Calling cast() on an rvalue calls pybind11::cast with the object rvalue, which does:
|
1263 |
+
// - If we have to move (because T has no copy constructor), do it. This will fail if the moved
|
1264 |
+
// object has multiple references, but trying to copy will fail to compile.
|
1265 |
+
// - If both movable and copyable, check ref count: if 1, move; otherwise copy
|
1266 |
+
// - Otherwise (not movable), copy.
|
1267 |
+
template <typename T>
|
1268 |
+
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_always<T>::value, T>
|
1269 |
+
cast(object &&object) {
|
1270 |
+
return move<T>(std::move(object));
|
1271 |
+
}
|
1272 |
+
template <typename T>
|
1273 |
+
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_if_unreferenced<T>::value, T>
|
1274 |
+
cast(object &&object) {
|
1275 |
+
if (object.ref_count() > 1) {
|
1276 |
+
return cast<T>(object);
|
1277 |
+
}
|
1278 |
+
return move<T>(std::move(object));
|
1279 |
+
}
|
1280 |
+
template <typename T>
|
1281 |
+
detail::enable_if_t<!detail::is_pyobject<T>::value && detail::move_never<T>::value, T>
|
1282 |
+
cast(object &&object) {
|
1283 |
+
return cast<T>(object);
|
1284 |
+
}
|
1285 |
+
|
1286 |
+
// pytype rvalue -> pytype (calls converting constructor)
|
1287 |
+
template <typename T>
|
1288 |
+
detail::enable_if_t<detail::is_pyobject<T>::value, T> cast(object &&object) {
|
1289 |
+
return T(std::move(object));
|
1290 |
+
}
|
1291 |
+
|
1292 |
+
template <typename T>
|
1293 |
+
T object::cast() const & {
|
1294 |
+
return pybind11::cast<T>(*this);
|
1295 |
+
}
|
1296 |
+
template <typename T>
|
1297 |
+
T object::cast() && {
|
1298 |
+
return pybind11::cast<T>(std::move(*this));
|
1299 |
+
}
|
1300 |
+
template <>
|
1301 |
+
inline void object::cast() const & {
|
1302 |
+
return;
|
1303 |
+
}
|
1304 |
+
template <>
|
1305 |
+
inline void object::cast() && {
|
1306 |
+
return;
|
1307 |
+
}
|
1308 |
+
|
1309 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1310 |
+
|
1311 |
+
// Declared in pytypes.h:
|
1312 |
+
template <typename T, enable_if_t<!is_pyobject<T>::value, int>>
|
1313 |
+
object object_or_cast(T &&o) {
|
1314 |
+
return pybind11::cast(std::forward<T>(o));
|
1315 |
+
}
|
1316 |
+
|
1317 |
+
// Placeholder type for the unneeded (and dead code) static variable in the
|
1318 |
+
// PYBIND11_OVERRIDE_OVERRIDE macro
|
1319 |
+
struct override_unused {};
|
1320 |
+
template <typename ret_type>
|
1321 |
+
using override_caster_t = conditional_t<cast_is_temporary_value_reference<ret_type>::value,
|
1322 |
+
make_caster<ret_type>,
|
1323 |
+
override_unused>;
|
1324 |
+
|
1325 |
+
// Trampoline use: for reference/pointer types to value-converted values, we do a value cast, then
|
1326 |
+
// store the result in the given variable. For other types, this is a no-op.
|
1327 |
+
template <typename T>
|
1328 |
+
enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&o,
|
1329 |
+
make_caster<T> &caster) {
|
1330 |
+
return cast_op<T>(load_type(caster, o));
|
1331 |
+
}
|
1332 |
+
template <typename T>
|
1333 |
+
enable_if_t<!cast_is_temporary_value_reference<T>::value, T> cast_ref(object &&,
|
1334 |
+
override_unused &) {
|
1335 |
+
pybind11_fail("Internal error: cast_ref fallback invoked");
|
1336 |
+
}
|
1337 |
+
|
1338 |
+
// Trampoline use: Having a pybind11::cast with an invalid reference type is going to
|
1339 |
+
// static_assert, even though if it's in dead code, so we provide a "trampoline" to pybind11::cast
|
1340 |
+
// that only does anything in cases where pybind11::cast is valid.
|
1341 |
+
template <typename T>
|
1342 |
+
enable_if_t<cast_is_temporary_value_reference<T>::value, T> cast_safe(object &&) {
|
1343 |
+
pybind11_fail("Internal error: cast_safe fallback invoked");
|
1344 |
+
}
|
1345 |
+
template <typename T>
|
1346 |
+
enable_if_t<std::is_void<T>::value, void> cast_safe(object &&) {}
|
1347 |
+
template <typename T>
|
1348 |
+
enable_if_t<detail::none_of<cast_is_temporary_value_reference<T>, std::is_void<T>>::value, T>
|
1349 |
+
cast_safe(object &&o) {
|
1350 |
+
return pybind11::cast<T>(std::move(o));
|
1351 |
+
}
|
1352 |
+
|
1353 |
+
PYBIND11_NAMESPACE_END(detail)
|
1354 |
+
|
1355 |
+
// The overloads could coexist, i.e. the #if is not strictly speaking needed,
|
1356 |
+
// but it is an easy minor optimization.
|
1357 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1358 |
+
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name) {
|
1359 |
+
return cast_error("Unable to convert call argument '" + name
|
1360 |
+
+ "' to Python object (#define "
|
1361 |
+
"PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
|
1362 |
+
}
|
1363 |
+
#else
|
1364 |
+
inline cast_error cast_error_unable_to_convert_call_arg(const std::string &name,
|
1365 |
+
const std::string &type) {
|
1366 |
+
return cast_error("Unable to convert call argument '" + name + "' of type '" + type
|
1367 |
+
+ "' to Python object");
|
1368 |
+
}
|
1369 |
+
#endif
|
1370 |
+
|
1371 |
+
template <return_value_policy policy = return_value_policy::automatic_reference>
|
1372 |
+
tuple make_tuple() {
|
1373 |
+
return tuple(0);
|
1374 |
+
}
|
1375 |
+
|
1376 |
+
template <return_value_policy policy = return_value_policy::automatic_reference, typename... Args>
|
1377 |
+
tuple make_tuple(Args &&...args_) {
|
1378 |
+
constexpr size_t size = sizeof...(Args);
|
1379 |
+
std::array<object, size> args{{reinterpret_steal<object>(
|
1380 |
+
detail::make_caster<Args>::cast(std::forward<Args>(args_), policy, nullptr))...}};
|
1381 |
+
for (size_t i = 0; i < args.size(); i++) {
|
1382 |
+
if (!args[i]) {
|
1383 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1384 |
+
throw cast_error_unable_to_convert_call_arg(std::to_string(i));
|
1385 |
+
#else
|
1386 |
+
std::array<std::string, size> argtypes{{type_id<Args>()...}};
|
1387 |
+
throw cast_error_unable_to_convert_call_arg(std::to_string(i), argtypes[i]);
|
1388 |
+
#endif
|
1389 |
+
}
|
1390 |
+
}
|
1391 |
+
tuple result(size);
|
1392 |
+
int counter = 0;
|
1393 |
+
for (auto &arg_value : args) {
|
1394 |
+
PyTuple_SET_ITEM(result.ptr(), counter++, arg_value.release().ptr());
|
1395 |
+
}
|
1396 |
+
return result;
|
1397 |
+
}
|
1398 |
+
|
1399 |
+
/// \ingroup annotations
|
1400 |
+
/// Annotation for arguments
|
1401 |
+
struct arg {
|
1402 |
+
/// Constructs an argument with the name of the argument; if null or omitted, this is a
|
1403 |
+
/// positional argument.
|
1404 |
+
constexpr explicit arg(const char *name = nullptr)
|
1405 |
+
: name(name), flag_noconvert(false), flag_none(true) {}
|
1406 |
+
/// Assign a value to this argument
|
1407 |
+
template <typename T>
|
1408 |
+
arg_v operator=(T &&value) const;
|
1409 |
+
/// Indicate that the type should not be converted in the type caster
|
1410 |
+
arg &noconvert(bool flag = true) {
|
1411 |
+
flag_noconvert = flag;
|
1412 |
+
return *this;
|
1413 |
+
}
|
1414 |
+
/// Indicates that the argument should/shouldn't allow None (e.g. for nullable pointer args)
|
1415 |
+
arg &none(bool flag = true) {
|
1416 |
+
flag_none = flag;
|
1417 |
+
return *this;
|
1418 |
+
}
|
1419 |
+
|
1420 |
+
const char *name; ///< If non-null, this is a named kwargs argument
|
1421 |
+
bool flag_noconvert : 1; ///< If set, do not allow conversion (requires a supporting type
|
1422 |
+
///< caster!)
|
1423 |
+
bool flag_none : 1; ///< If set (the default), allow None to be passed to this argument
|
1424 |
+
};
|
1425 |
+
|
1426 |
+
/// \ingroup annotations
|
1427 |
+
/// Annotation for arguments with values
|
1428 |
+
struct arg_v : arg {
|
1429 |
+
private:
|
1430 |
+
template <typename T>
|
1431 |
+
arg_v(arg &&base, T &&x, const char *descr = nullptr)
|
1432 |
+
: arg(base), value(reinterpret_steal<object>(detail::make_caster<T>::cast(
|
1433 |
+
std::forward<T>(x), return_value_policy::automatic, {}))),
|
1434 |
+
descr(descr)
|
1435 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1436 |
+
,
|
1437 |
+
type(type_id<T>())
|
1438 |
+
#endif
|
1439 |
+
{
|
1440 |
+
// Workaround! See:
|
1441 |
+
// https://github.com/pybind/pybind11/issues/2336
|
1442 |
+
// https://github.com/pybind/pybind11/pull/2685#issuecomment-731286700
|
1443 |
+
if (PyErr_Occurred()) {
|
1444 |
+
PyErr_Clear();
|
1445 |
+
}
|
1446 |
+
}
|
1447 |
+
|
1448 |
+
public:
|
1449 |
+
/// Direct construction with name, default, and description
|
1450 |
+
template <typename T>
|
1451 |
+
arg_v(const char *name, T &&x, const char *descr = nullptr)
|
1452 |
+
: arg_v(arg(name), std::forward<T>(x), descr) {}
|
1453 |
+
|
1454 |
+
/// Called internally when invoking `py::arg("a") = value`
|
1455 |
+
template <typename T>
|
1456 |
+
arg_v(const arg &base, T &&x, const char *descr = nullptr)
|
1457 |
+
: arg_v(arg(base), std::forward<T>(x), descr) {}
|
1458 |
+
|
1459 |
+
/// Same as `arg::noconvert()`, but returns *this as arg_v&, not arg&
|
1460 |
+
arg_v &noconvert(bool flag = true) {
|
1461 |
+
arg::noconvert(flag);
|
1462 |
+
return *this;
|
1463 |
+
}
|
1464 |
+
|
1465 |
+
/// Same as `arg::nonone()`, but returns *this as arg_v&, not arg&
|
1466 |
+
arg_v &none(bool flag = true) {
|
1467 |
+
arg::none(flag);
|
1468 |
+
return *this;
|
1469 |
+
}
|
1470 |
+
|
1471 |
+
/// The default value
|
1472 |
+
object value;
|
1473 |
+
/// The (optional) description of the default value
|
1474 |
+
const char *descr;
|
1475 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1476 |
+
/// The C++ type name of the default value (only available when compiled in debug mode)
|
1477 |
+
std::string type;
|
1478 |
+
#endif
|
1479 |
+
};
|
1480 |
+
|
1481 |
+
/// \ingroup annotations
|
1482 |
+
/// Annotation indicating that all following arguments are keyword-only; the is the equivalent of
|
1483 |
+
/// an unnamed '*' argument
|
1484 |
+
struct kw_only {};
|
1485 |
+
|
1486 |
+
/// \ingroup annotations
|
1487 |
+
/// Annotation indicating that all previous arguments are positional-only; the is the equivalent of
|
1488 |
+
/// an unnamed '/' argument (in Python 3.8)
|
1489 |
+
struct pos_only {};
|
1490 |
+
|
1491 |
+
template <typename T>
|
1492 |
+
arg_v arg::operator=(T &&value) const {
|
1493 |
+
return {*this, std::forward<T>(value)};
|
1494 |
+
}
|
1495 |
+
|
1496 |
+
/// Alias for backward compatibility -- to be removed in version 2.0
|
1497 |
+
template <typename /*unused*/>
|
1498 |
+
using arg_t = arg_v;
|
1499 |
+
|
1500 |
+
inline namespace literals {
|
1501 |
+
/** \rst
|
1502 |
+
String literal version of `arg`
|
1503 |
+
\endrst */
|
1504 |
+
constexpr arg
|
1505 |
+
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 5
|
1506 |
+
operator"" _a // gcc 4.8.5 insists on having a space (hard error).
|
1507 |
+
#else
|
1508 |
+
operator""_a // clang 17 generates a deprecation warning if there is a space.
|
1509 |
+
#endif
|
1510 |
+
(const char *name, size_t) {
|
1511 |
+
return arg(name);
|
1512 |
+
}
|
1513 |
+
} // namespace literals
|
1514 |
+
|
1515 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1516 |
+
|
1517 |
+
template <typename T>
|
1518 |
+
using is_kw_only = std::is_same<intrinsic_t<T>, kw_only>;
|
1519 |
+
template <typename T>
|
1520 |
+
using is_pos_only = std::is_same<intrinsic_t<T>, pos_only>;
|
1521 |
+
|
1522 |
+
// forward declaration (definition in attr.h)
|
1523 |
+
struct function_record;
|
1524 |
+
|
1525 |
+
/// Internal data associated with a single function call
|
1526 |
+
struct function_call {
|
1527 |
+
function_call(const function_record &f, handle p); // Implementation in attr.h
|
1528 |
+
|
1529 |
+
/// The function data:
|
1530 |
+
const function_record &func;
|
1531 |
+
|
1532 |
+
/// Arguments passed to the function:
|
1533 |
+
std::vector<handle> args;
|
1534 |
+
|
1535 |
+
/// The `convert` value the arguments should be loaded with
|
1536 |
+
std::vector<bool> args_convert;
|
1537 |
+
|
1538 |
+
/// Extra references for the optional `py::args` and/or `py::kwargs` arguments (which, if
|
1539 |
+
/// present, are also in `args` but without a reference).
|
1540 |
+
object args_ref, kwargs_ref;
|
1541 |
+
|
1542 |
+
/// The parent, if any
|
1543 |
+
handle parent;
|
1544 |
+
|
1545 |
+
/// If this is a call to an initializer, this argument contains `self`
|
1546 |
+
handle init_self;
|
1547 |
+
};
|
1548 |
+
|
1549 |
+
/// Helper class which loads arguments for C++ functions called from Python
|
1550 |
+
template <typename... Args>
|
1551 |
+
class argument_loader {
|
1552 |
+
using indices = make_index_sequence<sizeof...(Args)>;
|
1553 |
+
|
1554 |
+
template <typename Arg>
|
1555 |
+
using argument_is_args = std::is_same<intrinsic_t<Arg>, args>;
|
1556 |
+
template <typename Arg>
|
1557 |
+
using argument_is_kwargs = std::is_same<intrinsic_t<Arg>, kwargs>;
|
1558 |
+
// Get kwargs argument position, or -1 if not present:
|
1559 |
+
static constexpr auto kwargs_pos = constexpr_last<argument_is_kwargs, Args...>();
|
1560 |
+
|
1561 |
+
static_assert(kwargs_pos == -1 || kwargs_pos == (int) sizeof...(Args) - 1,
|
1562 |
+
"py::kwargs is only permitted as the last argument of a function");
|
1563 |
+
|
1564 |
+
public:
|
1565 |
+
static constexpr bool has_kwargs = kwargs_pos != -1;
|
1566 |
+
|
1567 |
+
// py::args argument position; -1 if not present.
|
1568 |
+
static constexpr int args_pos = constexpr_last<argument_is_args, Args...>();
|
1569 |
+
|
1570 |
+
static_assert(args_pos == -1 || args_pos == constexpr_first<argument_is_args, Args...>(),
|
1571 |
+
"py::args cannot be specified more than once");
|
1572 |
+
|
1573 |
+
static constexpr auto arg_names
|
1574 |
+
= ::pybind11::detail::concat(type_descr(make_caster<Args>::name)...);
|
1575 |
+
|
1576 |
+
bool load_args(function_call &call) { return load_impl_sequence(call, indices{}); }
|
1577 |
+
|
1578 |
+
template <typename Return, typename Guard, typename Func>
|
1579 |
+
// NOLINTNEXTLINE(readability-const-return-type)
|
1580 |
+
enable_if_t<!std::is_void<Return>::value, Return> call(Func &&f) && {
|
1581 |
+
return std::move(*this).template call_impl<remove_cv_t<Return>>(
|
1582 |
+
std::forward<Func>(f), indices{}, Guard{});
|
1583 |
+
}
|
1584 |
+
|
1585 |
+
template <typename Return, typename Guard, typename Func>
|
1586 |
+
enable_if_t<std::is_void<Return>::value, void_type> call(Func &&f) && {
|
1587 |
+
std::move(*this).template call_impl<remove_cv_t<Return>>(
|
1588 |
+
std::forward<Func>(f), indices{}, Guard{});
|
1589 |
+
return void_type();
|
1590 |
+
}
|
1591 |
+
|
1592 |
+
private:
|
1593 |
+
static bool load_impl_sequence(function_call &, index_sequence<>) { return true; }
|
1594 |
+
|
1595 |
+
template <size_t... Is>
|
1596 |
+
bool load_impl_sequence(function_call &call, index_sequence<Is...>) {
|
1597 |
+
#ifdef __cpp_fold_expressions
|
1598 |
+
if ((... || !std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is]))) {
|
1599 |
+
return false;
|
1600 |
+
}
|
1601 |
+
#else
|
1602 |
+
for (bool r : {std::get<Is>(argcasters).load(call.args[Is], call.args_convert[Is])...}) {
|
1603 |
+
if (!r) {
|
1604 |
+
return false;
|
1605 |
+
}
|
1606 |
+
}
|
1607 |
+
#endif
|
1608 |
+
return true;
|
1609 |
+
}
|
1610 |
+
|
1611 |
+
template <typename Return, typename Func, size_t... Is, typename Guard>
|
1612 |
+
Return call_impl(Func &&f, index_sequence<Is...>, Guard &&) && {
|
1613 |
+
return std::forward<Func>(f)(cast_op<Args>(std::move(std::get<Is>(argcasters)))...);
|
1614 |
+
}
|
1615 |
+
|
1616 |
+
std::tuple<make_caster<Args>...> argcasters;
|
1617 |
+
};
|
1618 |
+
|
1619 |
+
/// Helper class which collects only positional arguments for a Python function call.
|
1620 |
+
/// A fancier version below can collect any argument, but this one is optimal for simple calls.
|
1621 |
+
template <return_value_policy policy>
|
1622 |
+
class simple_collector {
|
1623 |
+
public:
|
1624 |
+
template <typename... Ts>
|
1625 |
+
explicit simple_collector(Ts &&...values)
|
1626 |
+
: m_args(pybind11::make_tuple<policy>(std::forward<Ts>(values)...)) {}
|
1627 |
+
|
1628 |
+
const tuple &args() const & { return m_args; }
|
1629 |
+
dict kwargs() const { return {}; }
|
1630 |
+
|
1631 |
+
tuple args() && { return std::move(m_args); }
|
1632 |
+
|
1633 |
+
/// Call a Python function and pass the collected arguments
|
1634 |
+
object call(PyObject *ptr) const {
|
1635 |
+
PyObject *result = PyObject_CallObject(ptr, m_args.ptr());
|
1636 |
+
if (!result) {
|
1637 |
+
throw error_already_set();
|
1638 |
+
}
|
1639 |
+
return reinterpret_steal<object>(result);
|
1640 |
+
}
|
1641 |
+
|
1642 |
+
private:
|
1643 |
+
tuple m_args;
|
1644 |
+
};
|
1645 |
+
|
1646 |
+
/// Helper class which collects positional, keyword, * and ** arguments for a Python function call
|
1647 |
+
template <return_value_policy policy>
|
1648 |
+
class unpacking_collector {
|
1649 |
+
public:
|
1650 |
+
template <typename... Ts>
|
1651 |
+
explicit unpacking_collector(Ts &&...values) {
|
1652 |
+
// Tuples aren't (easily) resizable so a list is needed for collection,
|
1653 |
+
// but the actual function call strictly requires a tuple.
|
1654 |
+
auto args_list = list();
|
1655 |
+
using expander = int[];
|
1656 |
+
(void) expander{0, (process(args_list, std::forward<Ts>(values)), 0)...};
|
1657 |
+
|
1658 |
+
m_args = std::move(args_list);
|
1659 |
+
}
|
1660 |
+
|
1661 |
+
const tuple &args() const & { return m_args; }
|
1662 |
+
const dict &kwargs() const & { return m_kwargs; }
|
1663 |
+
|
1664 |
+
tuple args() && { return std::move(m_args); }
|
1665 |
+
dict kwargs() && { return std::move(m_kwargs); }
|
1666 |
+
|
1667 |
+
/// Call a Python function and pass the collected arguments
|
1668 |
+
object call(PyObject *ptr) const {
|
1669 |
+
PyObject *result = PyObject_Call(ptr, m_args.ptr(), m_kwargs.ptr());
|
1670 |
+
if (!result) {
|
1671 |
+
throw error_already_set();
|
1672 |
+
}
|
1673 |
+
return reinterpret_steal<object>(result);
|
1674 |
+
}
|
1675 |
+
|
1676 |
+
private:
|
1677 |
+
template <typename T>
|
1678 |
+
void process(list &args_list, T &&x) {
|
1679 |
+
auto o = reinterpret_steal<object>(
|
1680 |
+
detail::make_caster<T>::cast(std::forward<T>(x), policy, {}));
|
1681 |
+
if (!o) {
|
1682 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1683 |
+
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()));
|
1684 |
+
#else
|
1685 |
+
throw cast_error_unable_to_convert_call_arg(std::to_string(args_list.size()),
|
1686 |
+
type_id<T>());
|
1687 |
+
#endif
|
1688 |
+
}
|
1689 |
+
args_list.append(std::move(o));
|
1690 |
+
}
|
1691 |
+
|
1692 |
+
void process(list &args_list, detail::args_proxy ap) {
|
1693 |
+
for (auto a : ap) {
|
1694 |
+
args_list.append(a);
|
1695 |
+
}
|
1696 |
+
}
|
1697 |
+
|
1698 |
+
void process(list & /*args_list*/, arg_v a) {
|
1699 |
+
if (!a.name) {
|
1700 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1701 |
+
nameless_argument_error();
|
1702 |
+
#else
|
1703 |
+
nameless_argument_error(a.type);
|
1704 |
+
#endif
|
1705 |
+
}
|
1706 |
+
if (m_kwargs.contains(a.name)) {
|
1707 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1708 |
+
multiple_values_error();
|
1709 |
+
#else
|
1710 |
+
multiple_values_error(a.name);
|
1711 |
+
#endif
|
1712 |
+
}
|
1713 |
+
if (!a.value) {
|
1714 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1715 |
+
throw cast_error_unable_to_convert_call_arg(a.name);
|
1716 |
+
#else
|
1717 |
+
throw cast_error_unable_to_convert_call_arg(a.name, a.type);
|
1718 |
+
#endif
|
1719 |
+
}
|
1720 |
+
m_kwargs[a.name] = std::move(a.value);
|
1721 |
+
}
|
1722 |
+
|
1723 |
+
void process(list & /*args_list*/, detail::kwargs_proxy kp) {
|
1724 |
+
if (!kp) {
|
1725 |
+
return;
|
1726 |
+
}
|
1727 |
+
for (auto k : reinterpret_borrow<dict>(kp)) {
|
1728 |
+
if (m_kwargs.contains(k.first)) {
|
1729 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
1730 |
+
multiple_values_error();
|
1731 |
+
#else
|
1732 |
+
multiple_values_error(str(k.first));
|
1733 |
+
#endif
|
1734 |
+
}
|
1735 |
+
m_kwargs[k.first] = k.second;
|
1736 |
+
}
|
1737 |
+
}
|
1738 |
+
|
1739 |
+
[[noreturn]] static void nameless_argument_error() {
|
1740 |
+
throw type_error(
|
1741 |
+
"Got kwargs without a name; only named arguments "
|
1742 |
+
"may be passed via py::arg() to a python function call. "
|
1743 |
+
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
|
1744 |
+
}
|
1745 |
+
[[noreturn]] static void nameless_argument_error(const std::string &type) {
|
1746 |
+
throw type_error("Got kwargs without a name of type '" + type
|
1747 |
+
+ "'; only named "
|
1748 |
+
"arguments may be passed via py::arg() to a python function call. ");
|
1749 |
+
}
|
1750 |
+
[[noreturn]] static void multiple_values_error() {
|
1751 |
+
throw type_error(
|
1752 |
+
"Got multiple values for keyword argument "
|
1753 |
+
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for details)");
|
1754 |
+
}
|
1755 |
+
|
1756 |
+
[[noreturn]] static void multiple_values_error(const std::string &name) {
|
1757 |
+
throw type_error("Got multiple values for keyword argument '" + name + "'");
|
1758 |
+
}
|
1759 |
+
|
1760 |
+
private:
|
1761 |
+
tuple m_args;
|
1762 |
+
dict m_kwargs;
|
1763 |
+
};
|
1764 |
+
|
1765 |
+
// [workaround(intel)] Separate function required here
|
1766 |
+
// We need to put this into a separate function because the Intel compiler
|
1767 |
+
// fails to compile enable_if_t<!all_of<is_positional<Args>...>::value>
|
1768 |
+
// (tested with ICC 2021.1 Beta 20200827).
|
1769 |
+
template <typename... Args>
|
1770 |
+
constexpr bool args_are_all_positional() {
|
1771 |
+
return all_of<is_positional<Args>...>::value;
|
1772 |
+
}
|
1773 |
+
|
1774 |
+
/// Collect only positional arguments for a Python function call
|
1775 |
+
template <return_value_policy policy,
|
1776 |
+
typename... Args,
|
1777 |
+
typename = enable_if_t<args_are_all_positional<Args...>()>>
|
1778 |
+
simple_collector<policy> collect_arguments(Args &&...args) {
|
1779 |
+
return simple_collector<policy>(std::forward<Args>(args)...);
|
1780 |
+
}
|
1781 |
+
|
1782 |
+
/// Collect all arguments, including keywords and unpacking (only instantiated when needed)
|
1783 |
+
template <return_value_policy policy,
|
1784 |
+
typename... Args,
|
1785 |
+
typename = enable_if_t<!args_are_all_positional<Args...>()>>
|
1786 |
+
unpacking_collector<policy> collect_arguments(Args &&...args) {
|
1787 |
+
// Following argument order rules for generalized unpacking according to PEP 448
|
1788 |
+
static_assert(constexpr_last<is_positional, Args...>()
|
1789 |
+
< constexpr_first<is_keyword_or_ds, Args...>()
|
1790 |
+
&& constexpr_last<is_s_unpacking, Args...>()
|
1791 |
+
< constexpr_first<is_ds_unpacking, Args...>(),
|
1792 |
+
"Invalid function call: positional args must precede keywords and ** unpacking; "
|
1793 |
+
"* unpacking must precede ** unpacking");
|
1794 |
+
return unpacking_collector<policy>(std::forward<Args>(args)...);
|
1795 |
+
}
|
1796 |
+
|
1797 |
+
template <typename Derived>
|
1798 |
+
template <return_value_policy policy, typename... Args>
|
1799 |
+
object object_api<Derived>::operator()(Args &&...args) const {
|
1800 |
+
#ifndef NDEBUG
|
1801 |
+
if (!PyGILState_Check()) {
|
1802 |
+
pybind11_fail("pybind11::object_api<>::operator() PyGILState_Check() failure.");
|
1803 |
+
}
|
1804 |
+
#endif
|
1805 |
+
return detail::collect_arguments<policy>(std::forward<Args>(args)...).call(derived().ptr());
|
1806 |
+
}
|
1807 |
+
|
1808 |
+
template <typename Derived>
|
1809 |
+
template <return_value_policy policy, typename... Args>
|
1810 |
+
object object_api<Derived>::call(Args &&...args) const {
|
1811 |
+
return operator()<policy>(std::forward<Args>(args)...);
|
1812 |
+
}
|
1813 |
+
|
1814 |
+
PYBIND11_NAMESPACE_END(detail)
|
1815 |
+
|
1816 |
+
template <typename T>
|
1817 |
+
handle type::handle_of() {
|
1818 |
+
static_assert(std::is_base_of<detail::type_caster_generic, detail::make_caster<T>>::value,
|
1819 |
+
"py::type::of<T> only supports the case where T is a registered C++ types.");
|
1820 |
+
|
1821 |
+
return detail::get_type_handle(typeid(T), true);
|
1822 |
+
}
|
1823 |
+
|
1824 |
+
#define PYBIND11_MAKE_OPAQUE(...) \
|
1825 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) \
|
1826 |
+
namespace detail { \
|
1827 |
+
template <> \
|
1828 |
+
class type_caster<__VA_ARGS__> : public type_caster_base<__VA_ARGS__> {}; \
|
1829 |
+
} \
|
1830 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
1831 |
+
|
1832 |
+
/// Lets you pass a type containing a `,` through a macro parameter without needing a separate
|
1833 |
+
/// typedef, e.g.:
|
1834 |
+
/// `PYBIND11_OVERRIDE(PYBIND11_TYPE(ReturnType<A, B>), PYBIND11_TYPE(Parent<C, D>), f, arg)`
|
1835 |
+
#define PYBIND11_TYPE(...) __VA_ARGS__
|
1836 |
+
|
1837 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/chrono.h
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/chrono.h: Transparent conversion between std::chrono and python's datetime
|
3 |
+
|
4 |
+
Copyright (c) 2016 Trent Houliston <[email protected]> and
|
5 |
+
Wenzel Jakob <[email protected]>
|
6 |
+
|
7 |
+
All rights reserved. Use of this source code is governed by a
|
8 |
+
BSD-style license that can be found in the LICENSE file.
|
9 |
+
*/
|
10 |
+
|
11 |
+
#pragma once
|
12 |
+
|
13 |
+
#include "pybind11.h"
|
14 |
+
|
15 |
+
#include <chrono>
|
16 |
+
#include <cmath>
|
17 |
+
#include <ctime>
|
18 |
+
#include <datetime.h>
|
19 |
+
#include <mutex>
|
20 |
+
|
21 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
22 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
23 |
+
|
24 |
+
template <typename type>
|
25 |
+
class duration_caster {
|
26 |
+
public:
|
27 |
+
using rep = typename type::rep;
|
28 |
+
using period = typename type::period;
|
29 |
+
|
30 |
+
// signed 25 bits required by the standard.
|
31 |
+
using days = std::chrono::duration<int_least32_t, std::ratio<86400>>;
|
32 |
+
|
33 |
+
bool load(handle src, bool) {
|
34 |
+
using namespace std::chrono;
|
35 |
+
|
36 |
+
// Lazy initialise the PyDateTime import
|
37 |
+
if (!PyDateTimeAPI) {
|
38 |
+
PyDateTime_IMPORT;
|
39 |
+
}
|
40 |
+
|
41 |
+
if (!src) {
|
42 |
+
return false;
|
43 |
+
}
|
44 |
+
// If invoked with datetime.delta object
|
45 |
+
if (PyDelta_Check(src.ptr())) {
|
46 |
+
value = type(duration_cast<duration<rep, period>>(
|
47 |
+
days(PyDateTime_DELTA_GET_DAYS(src.ptr()))
|
48 |
+
+ seconds(PyDateTime_DELTA_GET_SECONDS(src.ptr()))
|
49 |
+
+ microseconds(PyDateTime_DELTA_GET_MICROSECONDS(src.ptr()))));
|
50 |
+
return true;
|
51 |
+
}
|
52 |
+
// If invoked with a float we assume it is seconds and convert
|
53 |
+
if (PyFloat_Check(src.ptr())) {
|
54 |
+
value = type(duration_cast<duration<rep, period>>(
|
55 |
+
duration<double>(PyFloat_AsDouble(src.ptr()))));
|
56 |
+
return true;
|
57 |
+
}
|
58 |
+
return false;
|
59 |
+
}
|
60 |
+
|
61 |
+
// If this is a duration just return it back
|
62 |
+
static const std::chrono::duration<rep, period> &
|
63 |
+
get_duration(const std::chrono::duration<rep, period> &src) {
|
64 |
+
return src;
|
65 |
+
}
|
66 |
+
|
67 |
+
// If this is a time_point get the time_since_epoch
|
68 |
+
template <typename Clock>
|
69 |
+
static std::chrono::duration<rep, period>
|
70 |
+
get_duration(const std::chrono::time_point<Clock, std::chrono::duration<rep, period>> &src) {
|
71 |
+
return src.time_since_epoch();
|
72 |
+
}
|
73 |
+
|
74 |
+
static handle cast(const type &src, return_value_policy /* policy */, handle /* parent */) {
|
75 |
+
using namespace std::chrono;
|
76 |
+
|
77 |
+
// Use overloaded function to get our duration from our source
|
78 |
+
// Works out if it is a duration or time_point and get the duration
|
79 |
+
auto d = get_duration(src);
|
80 |
+
|
81 |
+
// Lazy initialise the PyDateTime import
|
82 |
+
if (!PyDateTimeAPI) {
|
83 |
+
PyDateTime_IMPORT;
|
84 |
+
}
|
85 |
+
|
86 |
+
// Declare these special duration types so the conversions happen with the correct
|
87 |
+
// primitive types (int)
|
88 |
+
using dd_t = duration<int, std::ratio<86400>>;
|
89 |
+
using ss_t = duration<int, std::ratio<1>>;
|
90 |
+
using us_t = duration<int, std::micro>;
|
91 |
+
|
92 |
+
auto dd = duration_cast<dd_t>(d);
|
93 |
+
auto subd = d - dd;
|
94 |
+
auto ss = duration_cast<ss_t>(subd);
|
95 |
+
auto us = duration_cast<us_t>(subd - ss);
|
96 |
+
return PyDelta_FromDSU(dd.count(), ss.count(), us.count());
|
97 |
+
}
|
98 |
+
|
99 |
+
PYBIND11_TYPE_CASTER(type, const_name("datetime.timedelta"));
|
100 |
+
};
|
101 |
+
|
102 |
+
inline std::tm *localtime_thread_safe(const std::time_t *time, std::tm *buf) {
|
103 |
+
#if (defined(__STDC_LIB_EXT1__) && defined(__STDC_WANT_LIB_EXT1__)) || defined(_MSC_VER)
|
104 |
+
if (localtime_s(buf, time))
|
105 |
+
return nullptr;
|
106 |
+
return buf;
|
107 |
+
#else
|
108 |
+
static std::mutex mtx;
|
109 |
+
std::lock_guard<std::mutex> lock(mtx);
|
110 |
+
std::tm *tm_ptr = std::localtime(time);
|
111 |
+
if (tm_ptr != nullptr) {
|
112 |
+
*buf = *tm_ptr;
|
113 |
+
}
|
114 |
+
return tm_ptr;
|
115 |
+
#endif
|
116 |
+
}
|
117 |
+
|
118 |
+
// This is for casting times on the system clock into datetime.datetime instances
|
119 |
+
template <typename Duration>
|
120 |
+
class type_caster<std::chrono::time_point<std::chrono::system_clock, Duration>> {
|
121 |
+
public:
|
122 |
+
using type = std::chrono::time_point<std::chrono::system_clock, Duration>;
|
123 |
+
bool load(handle src, bool) {
|
124 |
+
using namespace std::chrono;
|
125 |
+
|
126 |
+
// Lazy initialise the PyDateTime import
|
127 |
+
if (!PyDateTimeAPI) {
|
128 |
+
PyDateTime_IMPORT;
|
129 |
+
}
|
130 |
+
|
131 |
+
if (!src) {
|
132 |
+
return false;
|
133 |
+
}
|
134 |
+
|
135 |
+
std::tm cal;
|
136 |
+
microseconds msecs;
|
137 |
+
|
138 |
+
if (PyDateTime_Check(src.ptr())) {
|
139 |
+
cal.tm_sec = PyDateTime_DATE_GET_SECOND(src.ptr());
|
140 |
+
cal.tm_min = PyDateTime_DATE_GET_MINUTE(src.ptr());
|
141 |
+
cal.tm_hour = PyDateTime_DATE_GET_HOUR(src.ptr());
|
142 |
+
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
|
143 |
+
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
|
144 |
+
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
|
145 |
+
cal.tm_isdst = -1;
|
146 |
+
msecs = microseconds(PyDateTime_DATE_GET_MICROSECOND(src.ptr()));
|
147 |
+
} else if (PyDate_Check(src.ptr())) {
|
148 |
+
cal.tm_sec = 0;
|
149 |
+
cal.tm_min = 0;
|
150 |
+
cal.tm_hour = 0;
|
151 |
+
cal.tm_mday = PyDateTime_GET_DAY(src.ptr());
|
152 |
+
cal.tm_mon = PyDateTime_GET_MONTH(src.ptr()) - 1;
|
153 |
+
cal.tm_year = PyDateTime_GET_YEAR(src.ptr()) - 1900;
|
154 |
+
cal.tm_isdst = -1;
|
155 |
+
msecs = microseconds(0);
|
156 |
+
} else if (PyTime_Check(src.ptr())) {
|
157 |
+
cal.tm_sec = PyDateTime_TIME_GET_SECOND(src.ptr());
|
158 |
+
cal.tm_min = PyDateTime_TIME_GET_MINUTE(src.ptr());
|
159 |
+
cal.tm_hour = PyDateTime_TIME_GET_HOUR(src.ptr());
|
160 |
+
cal.tm_mday = 1; // This date (day, month, year) = (1, 0, 70)
|
161 |
+
cal.tm_mon = 0; // represents 1-Jan-1970, which is the first
|
162 |
+
cal.tm_year = 70; // earliest available date for Python's datetime
|
163 |
+
cal.tm_isdst = -1;
|
164 |
+
msecs = microseconds(PyDateTime_TIME_GET_MICROSECOND(src.ptr()));
|
165 |
+
} else {
|
166 |
+
return false;
|
167 |
+
}
|
168 |
+
|
169 |
+
value = time_point_cast<Duration>(system_clock::from_time_t(std::mktime(&cal)) + msecs);
|
170 |
+
return true;
|
171 |
+
}
|
172 |
+
|
173 |
+
static handle cast(const std::chrono::time_point<std::chrono::system_clock, Duration> &src,
|
174 |
+
return_value_policy /* policy */,
|
175 |
+
handle /* parent */) {
|
176 |
+
using namespace std::chrono;
|
177 |
+
|
178 |
+
// Lazy initialise the PyDateTime import
|
179 |
+
if (!PyDateTimeAPI) {
|
180 |
+
PyDateTime_IMPORT;
|
181 |
+
}
|
182 |
+
|
183 |
+
// Get out microseconds, and make sure they are positive, to avoid bug in eastern
|
184 |
+
// hemisphere time zones (cfr. https://github.com/pybind/pybind11/issues/2417)
|
185 |
+
using us_t = duration<int, std::micro>;
|
186 |
+
auto us = duration_cast<us_t>(src.time_since_epoch() % seconds(1));
|
187 |
+
if (us.count() < 0) {
|
188 |
+
us += seconds(1);
|
189 |
+
}
|
190 |
+
|
191 |
+
// Subtract microseconds BEFORE `system_clock::to_time_t`, because:
|
192 |
+
// > If std::time_t has lower precision, it is implementation-defined whether the value is
|
193 |
+
// rounded or truncated. (https://en.cppreference.com/w/cpp/chrono/system_clock/to_time_t)
|
194 |
+
std::time_t tt
|
195 |
+
= system_clock::to_time_t(time_point_cast<system_clock::duration>(src - us));
|
196 |
+
|
197 |
+
std::tm localtime;
|
198 |
+
std::tm *localtime_ptr = localtime_thread_safe(&tt, &localtime);
|
199 |
+
if (!localtime_ptr) {
|
200 |
+
throw cast_error("Unable to represent system_clock in local time");
|
201 |
+
}
|
202 |
+
return PyDateTime_FromDateAndTime(localtime.tm_year + 1900,
|
203 |
+
localtime.tm_mon + 1,
|
204 |
+
localtime.tm_mday,
|
205 |
+
localtime.tm_hour,
|
206 |
+
localtime.tm_min,
|
207 |
+
localtime.tm_sec,
|
208 |
+
us.count());
|
209 |
+
}
|
210 |
+
PYBIND11_TYPE_CASTER(type, const_name("datetime.datetime"));
|
211 |
+
};
|
212 |
+
|
213 |
+
// Other clocks that are not the system clock are not measured as datetime.datetime objects
|
214 |
+
// since they are not measured on calendar time. So instead we just make them timedeltas
|
215 |
+
// Or if they have passed us a time as a float we convert that
|
216 |
+
template <typename Clock, typename Duration>
|
217 |
+
class type_caster<std::chrono::time_point<Clock, Duration>>
|
218 |
+
: public duration_caster<std::chrono::time_point<Clock, Duration>> {};
|
219 |
+
|
220 |
+
template <typename Rep, typename Period>
|
221 |
+
class type_caster<std::chrono::duration<Rep, Period>>
|
222 |
+
: public duration_caster<std::chrono::duration<Rep, Period>> {};
|
223 |
+
|
224 |
+
PYBIND11_NAMESPACE_END(detail)
|
225 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/common.h
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
#include "detail/common.h"
|
2 |
+
#warning "Including 'common.h' is deprecated. It will be removed in v3.0. Use 'pybind11.h'."
|
venv/lib/python3.10/site-packages/torch/include/pybind11/complex.h
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/complex.h: Complex number support
|
3 |
+
|
4 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "pybind11.h"
|
13 |
+
|
14 |
+
#include <complex>
|
15 |
+
|
16 |
+
/// glibc defines I as a macro which breaks things, e.g., boost template names
|
17 |
+
#ifdef I
|
18 |
+
# undef I
|
19 |
+
#endif
|
20 |
+
|
21 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
22 |
+
|
23 |
+
template <typename T>
|
24 |
+
struct format_descriptor<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
|
25 |
+
static constexpr const char c = format_descriptor<T>::c;
|
26 |
+
static constexpr const char value[3] = {'Z', c, '\0'};
|
27 |
+
static std::string format() { return std::string(value); }
|
28 |
+
};
|
29 |
+
|
30 |
+
#ifndef PYBIND11_CPP17
|
31 |
+
|
32 |
+
template <typename T>
|
33 |
+
constexpr const char
|
34 |
+
format_descriptor<std::complex<T>,
|
35 |
+
detail::enable_if_t<std::is_floating_point<T>::value>>::value[3];
|
36 |
+
|
37 |
+
#endif
|
38 |
+
|
39 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
40 |
+
|
41 |
+
template <typename T>
|
42 |
+
struct is_fmt_numeric<std::complex<T>, detail::enable_if_t<std::is_floating_point<T>::value>> {
|
43 |
+
static constexpr bool value = true;
|
44 |
+
static constexpr int index = is_fmt_numeric<T>::index + 3;
|
45 |
+
};
|
46 |
+
|
47 |
+
template <typename T>
|
48 |
+
class type_caster<std::complex<T>> {
|
49 |
+
public:
|
50 |
+
bool load(handle src, bool convert) {
|
51 |
+
if (!src) {
|
52 |
+
return false;
|
53 |
+
}
|
54 |
+
if (!convert && !PyComplex_Check(src.ptr())) {
|
55 |
+
return false;
|
56 |
+
}
|
57 |
+
Py_complex result = PyComplex_AsCComplex(src.ptr());
|
58 |
+
if (result.real == -1.0 && PyErr_Occurred()) {
|
59 |
+
PyErr_Clear();
|
60 |
+
return false;
|
61 |
+
}
|
62 |
+
value = std::complex<T>((T) result.real, (T) result.imag);
|
63 |
+
return true;
|
64 |
+
}
|
65 |
+
|
66 |
+
static handle
|
67 |
+
cast(const std::complex<T> &src, return_value_policy /* policy */, handle /* parent */) {
|
68 |
+
return PyComplex_FromDoubles((double) src.real(), (double) src.imag());
|
69 |
+
}
|
70 |
+
|
71 |
+
PYBIND11_TYPE_CASTER(std::complex<T>, const_name("complex"));
|
72 |
+
};
|
73 |
+
PYBIND11_NAMESPACE_END(detail)
|
74 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/class.h
ADDED
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/class.h: Python C API implementation details for py::class_
|
3 |
+
|
4 |
+
Copyright (c) 2017 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "../attr.h"
|
13 |
+
#include "../options.h"
|
14 |
+
|
15 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
16 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
17 |
+
|
18 |
+
#if !defined(PYPY_VERSION)
|
19 |
+
# define PYBIND11_BUILTIN_QUALNAME
|
20 |
+
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj)
|
21 |
+
#else
|
22 |
+
// In PyPy, we still set __qualname__ so that we can produce reliable function type
|
23 |
+
// signatures; in CPython this macro expands to nothing:
|
24 |
+
# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) \
|
25 |
+
setattr((PyObject *) obj, "__qualname__", nameobj)
|
26 |
+
#endif
|
27 |
+
|
28 |
+
inline std::string get_fully_qualified_tp_name(PyTypeObject *type) {
|
29 |
+
#if !defined(PYPY_VERSION)
|
30 |
+
return type->tp_name;
|
31 |
+
#else
|
32 |
+
auto module_name = handle((PyObject *) type).attr("__module__").cast<std::string>();
|
33 |
+
if (module_name == PYBIND11_BUILTINS_MODULE)
|
34 |
+
return type->tp_name;
|
35 |
+
else
|
36 |
+
return std::move(module_name) + "." + type->tp_name;
|
37 |
+
#endif
|
38 |
+
}
|
39 |
+
|
40 |
+
inline PyTypeObject *type_incref(PyTypeObject *type) {
|
41 |
+
Py_INCREF(type);
|
42 |
+
return type;
|
43 |
+
}
|
44 |
+
|
45 |
+
#if !defined(PYPY_VERSION)
|
46 |
+
|
47 |
+
/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance.
|
48 |
+
extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) {
|
49 |
+
return PyProperty_Type.tp_descr_get(self, cls, cls);
|
50 |
+
}
|
51 |
+
|
52 |
+
/// `pybind11_static_property.__set__()`: Just like the above `__get__()`.
|
53 |
+
extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) {
|
54 |
+
PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj);
|
55 |
+
return PyProperty_Type.tp_descr_set(self, cls, value);
|
56 |
+
}
|
57 |
+
|
58 |
+
// Forward declaration to use in `make_static_property_type()`
|
59 |
+
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type);
|
60 |
+
|
61 |
+
/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()`
|
62 |
+
methods are modified to always use the object type instead of a concrete instance.
|
63 |
+
Return value: New reference. */
|
64 |
+
inline PyTypeObject *make_static_property_type() {
|
65 |
+
constexpr auto *name = "pybind11_static_property";
|
66 |
+
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
|
67 |
+
|
68 |
+
/* Danger zone: from now (and until PyType_Ready), make sure to
|
69 |
+
issue no Python C API calls which could potentially invoke the
|
70 |
+
garbage collector (the GC will call type_traverse(), which will in
|
71 |
+
turn find the newly constructed type in an invalid state) */
|
72 |
+
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
|
73 |
+
if (!heap_type) {
|
74 |
+
pybind11_fail("make_static_property_type(): error allocating type!");
|
75 |
+
}
|
76 |
+
|
77 |
+
heap_type->ht_name = name_obj.inc_ref().ptr();
|
78 |
+
# ifdef PYBIND11_BUILTIN_QUALNAME
|
79 |
+
heap_type->ht_qualname = name_obj.inc_ref().ptr();
|
80 |
+
# endif
|
81 |
+
|
82 |
+
auto *type = &heap_type->ht_type;
|
83 |
+
type->tp_name = name;
|
84 |
+
type->tp_base = type_incref(&PyProperty_Type);
|
85 |
+
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
|
86 |
+
type->tp_descr_get = pybind11_static_get;
|
87 |
+
type->tp_descr_set = pybind11_static_set;
|
88 |
+
|
89 |
+
# if PY_VERSION_HEX >= 0x030C0000
|
90 |
+
// Since Python-3.12 property-derived types are required to
|
91 |
+
// have dynamic attributes (to set `__doc__`)
|
92 |
+
enable_dynamic_attributes(heap_type);
|
93 |
+
# endif
|
94 |
+
|
95 |
+
if (PyType_Ready(type) < 0) {
|
96 |
+
pybind11_fail("make_static_property_type(): failure in PyType_Ready()!");
|
97 |
+
}
|
98 |
+
|
99 |
+
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
|
100 |
+
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
|
101 |
+
|
102 |
+
return type;
|
103 |
+
}
|
104 |
+
|
105 |
+
#else // PYPY
|
106 |
+
|
107 |
+
/** PyPy has some issues with the above C API, so we evaluate Python code instead.
|
108 |
+
This function will only be called once so performance isn't really a concern.
|
109 |
+
Return value: New reference. */
|
110 |
+
inline PyTypeObject *make_static_property_type() {
|
111 |
+
auto d = dict();
|
112 |
+
PyObject *result = PyRun_String(R"(\
|
113 |
+
class pybind11_static_property(property):
|
114 |
+
def __get__(self, obj, cls):
|
115 |
+
return property.__get__(self, cls, cls)
|
116 |
+
|
117 |
+
def __set__(self, obj, value):
|
118 |
+
cls = obj if isinstance(obj, type) else type(obj)
|
119 |
+
property.__set__(self, cls, value)
|
120 |
+
)",
|
121 |
+
Py_file_input,
|
122 |
+
d.ptr(),
|
123 |
+
d.ptr());
|
124 |
+
if (result == nullptr)
|
125 |
+
throw error_already_set();
|
126 |
+
Py_DECREF(result);
|
127 |
+
return (PyTypeObject *) d["pybind11_static_property"].cast<object>().release().ptr();
|
128 |
+
}
|
129 |
+
|
130 |
+
#endif // PYPY
|
131 |
+
|
132 |
+
/** Types with static properties need to handle `Type.static_prop = x` in a specific way.
|
133 |
+
By default, Python replaces the `static_property` itself, but for wrapped C++ types
|
134 |
+
we need to call `static_property.__set__()` in order to propagate the new value to
|
135 |
+
the underlying C++ data structure. */
|
136 |
+
extern "C" inline int pybind11_meta_setattro(PyObject *obj, PyObject *name, PyObject *value) {
|
137 |
+
// Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw
|
138 |
+
// descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`).
|
139 |
+
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
|
140 |
+
|
141 |
+
// The following assignment combinations are possible:
|
142 |
+
// 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)`
|
143 |
+
// 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop`
|
144 |
+
// 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment
|
145 |
+
auto *const static_prop = (PyObject *) get_internals().static_property_type;
|
146 |
+
const auto call_descr_set = (descr != nullptr) && (value != nullptr)
|
147 |
+
&& (PyObject_IsInstance(descr, static_prop) != 0)
|
148 |
+
&& (PyObject_IsInstance(value, static_prop) == 0);
|
149 |
+
if (call_descr_set) {
|
150 |
+
// Call `static_property.__set__()` instead of replacing the `static_property`.
|
151 |
+
#if !defined(PYPY_VERSION)
|
152 |
+
return Py_TYPE(descr)->tp_descr_set(descr, obj, value);
|
153 |
+
#else
|
154 |
+
if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) {
|
155 |
+
Py_DECREF(result);
|
156 |
+
return 0;
|
157 |
+
} else {
|
158 |
+
return -1;
|
159 |
+
}
|
160 |
+
#endif
|
161 |
+
} else {
|
162 |
+
// Replace existing attribute.
|
163 |
+
return PyType_Type.tp_setattro(obj, name, value);
|
164 |
+
}
|
165 |
+
}
|
166 |
+
|
167 |
+
/**
|
168 |
+
* Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing
|
169 |
+
* methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function,
|
170 |
+
* when called on a class, or a PyMethod, when called on an instance. Override that behaviour here
|
171 |
+
* to do a special case bypass for PyInstanceMethod_Types.
|
172 |
+
*/
|
173 |
+
extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) {
|
174 |
+
PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name);
|
175 |
+
if (descr && PyInstanceMethod_Check(descr)) {
|
176 |
+
Py_INCREF(descr);
|
177 |
+
return descr;
|
178 |
+
}
|
179 |
+
return PyType_Type.tp_getattro(obj, name);
|
180 |
+
}
|
181 |
+
|
182 |
+
/// metaclass `__call__` function that is used to create all pybind11 objects.
|
183 |
+
extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) {
|
184 |
+
|
185 |
+
// use the default metaclass call to create/initialize the object
|
186 |
+
PyObject *self = PyType_Type.tp_call(type, args, kwargs);
|
187 |
+
if (self == nullptr) {
|
188 |
+
return nullptr;
|
189 |
+
}
|
190 |
+
|
191 |
+
// Ensure that the base __init__ function(s) were called
|
192 |
+
values_and_holders vhs(self);
|
193 |
+
for (const auto &vh : vhs) {
|
194 |
+
if (!vh.holder_constructed() && !vhs.is_redundant_value_and_holder(vh)) {
|
195 |
+
PyErr_Format(PyExc_TypeError,
|
196 |
+
"%.200s.__init__() must be called when overriding __init__",
|
197 |
+
get_fully_qualified_tp_name(vh.type->type).c_str());
|
198 |
+
Py_DECREF(self);
|
199 |
+
return nullptr;
|
200 |
+
}
|
201 |
+
}
|
202 |
+
|
203 |
+
return self;
|
204 |
+
}
|
205 |
+
|
206 |
+
/// Cleanup the type-info for a pybind11-registered type.
|
207 |
+
extern "C" inline void pybind11_meta_dealloc(PyObject *obj) {
|
208 |
+
auto *type = (PyTypeObject *) obj;
|
209 |
+
auto &internals = get_internals();
|
210 |
+
|
211 |
+
// A pybind11-registered type will:
|
212 |
+
// 1) be found in internals.registered_types_py
|
213 |
+
// 2) have exactly one associated `detail::type_info`
|
214 |
+
auto found_type = internals.registered_types_py.find(type);
|
215 |
+
if (found_type != internals.registered_types_py.end() && found_type->second.size() == 1
|
216 |
+
&& found_type->second[0]->type == type) {
|
217 |
+
|
218 |
+
auto *tinfo = found_type->second[0];
|
219 |
+
auto tindex = std::type_index(*tinfo->cpptype);
|
220 |
+
internals.direct_conversions.erase(tindex);
|
221 |
+
|
222 |
+
if (tinfo->module_local) {
|
223 |
+
get_local_internals().registered_types_cpp.erase(tindex);
|
224 |
+
} else {
|
225 |
+
internals.registered_types_cpp.erase(tindex);
|
226 |
+
}
|
227 |
+
internals.registered_types_py.erase(tinfo->type);
|
228 |
+
|
229 |
+
// Actually just `std::erase_if`, but that's only available in C++20
|
230 |
+
auto &cache = internals.inactive_override_cache;
|
231 |
+
for (auto it = cache.begin(), last = cache.end(); it != last;) {
|
232 |
+
if (it->first == (PyObject *) tinfo->type) {
|
233 |
+
it = cache.erase(it);
|
234 |
+
} else {
|
235 |
+
++it;
|
236 |
+
}
|
237 |
+
}
|
238 |
+
|
239 |
+
delete tinfo;
|
240 |
+
}
|
241 |
+
|
242 |
+
PyType_Type.tp_dealloc(obj);
|
243 |
+
}
|
244 |
+
|
245 |
+
/** This metaclass is assigned by default to all pybind11 types and is required in order
|
246 |
+
for static properties to function correctly. Users may override this using `py::metaclass`.
|
247 |
+
Return value: New reference. */
|
248 |
+
inline PyTypeObject *make_default_metaclass() {
|
249 |
+
constexpr auto *name = "pybind11_type";
|
250 |
+
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
|
251 |
+
|
252 |
+
/* Danger zone: from now (and until PyType_Ready), make sure to
|
253 |
+
issue no Python C API calls which could potentially invoke the
|
254 |
+
garbage collector (the GC will call type_traverse(), which will in
|
255 |
+
turn find the newly constructed type in an invalid state) */
|
256 |
+
auto *heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0);
|
257 |
+
if (!heap_type) {
|
258 |
+
pybind11_fail("make_default_metaclass(): error allocating metaclass!");
|
259 |
+
}
|
260 |
+
|
261 |
+
heap_type->ht_name = name_obj.inc_ref().ptr();
|
262 |
+
#ifdef PYBIND11_BUILTIN_QUALNAME
|
263 |
+
heap_type->ht_qualname = name_obj.inc_ref().ptr();
|
264 |
+
#endif
|
265 |
+
|
266 |
+
auto *type = &heap_type->ht_type;
|
267 |
+
type->tp_name = name;
|
268 |
+
type->tp_base = type_incref(&PyType_Type);
|
269 |
+
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
|
270 |
+
|
271 |
+
type->tp_call = pybind11_meta_call;
|
272 |
+
|
273 |
+
type->tp_setattro = pybind11_meta_setattro;
|
274 |
+
type->tp_getattro = pybind11_meta_getattro;
|
275 |
+
|
276 |
+
type->tp_dealloc = pybind11_meta_dealloc;
|
277 |
+
|
278 |
+
if (PyType_Ready(type) < 0) {
|
279 |
+
pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!");
|
280 |
+
}
|
281 |
+
|
282 |
+
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
|
283 |
+
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
|
284 |
+
|
285 |
+
return type;
|
286 |
+
}
|
287 |
+
|
288 |
+
/// For multiple inheritance types we need to recursively register/deregister base pointers for any
|
289 |
+
/// base classes with pointers that are difference from the instance value pointer so that we can
|
290 |
+
/// correctly recognize an offset base class pointer. This calls a function with any offset base
|
291 |
+
/// ptrs.
|
292 |
+
inline void traverse_offset_bases(void *valueptr,
|
293 |
+
const detail::type_info *tinfo,
|
294 |
+
instance *self,
|
295 |
+
bool (*f)(void * /*parentptr*/, instance * /*self*/)) {
|
296 |
+
for (handle h : reinterpret_borrow<tuple>(tinfo->type->tp_bases)) {
|
297 |
+
if (auto *parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) {
|
298 |
+
for (auto &c : parent_tinfo->implicit_casts) {
|
299 |
+
if (c.first == tinfo->cpptype) {
|
300 |
+
auto *parentptr = c.second(valueptr);
|
301 |
+
if (parentptr != valueptr) {
|
302 |
+
f(parentptr, self);
|
303 |
+
}
|
304 |
+
traverse_offset_bases(parentptr, parent_tinfo, self, f);
|
305 |
+
break;
|
306 |
+
}
|
307 |
+
}
|
308 |
+
}
|
309 |
+
}
|
310 |
+
}
|
311 |
+
|
312 |
+
inline bool register_instance_impl(void *ptr, instance *self) {
|
313 |
+
get_internals().registered_instances.emplace(ptr, self);
|
314 |
+
return true; // unused, but gives the same signature as the deregister func
|
315 |
+
}
|
316 |
+
inline bool deregister_instance_impl(void *ptr, instance *self) {
|
317 |
+
auto ®istered_instances = get_internals().registered_instances;
|
318 |
+
auto range = registered_instances.equal_range(ptr);
|
319 |
+
for (auto it = range.first; it != range.second; ++it) {
|
320 |
+
if (self == it->second) {
|
321 |
+
registered_instances.erase(it);
|
322 |
+
return true;
|
323 |
+
}
|
324 |
+
}
|
325 |
+
return false;
|
326 |
+
}
|
327 |
+
|
328 |
+
inline void register_instance(instance *self, void *valptr, const type_info *tinfo) {
|
329 |
+
register_instance_impl(valptr, self);
|
330 |
+
if (!tinfo->simple_ancestors) {
|
331 |
+
traverse_offset_bases(valptr, tinfo, self, register_instance_impl);
|
332 |
+
}
|
333 |
+
}
|
334 |
+
|
335 |
+
inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) {
|
336 |
+
bool ret = deregister_instance_impl(valptr, self);
|
337 |
+
if (!tinfo->simple_ancestors) {
|
338 |
+
traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl);
|
339 |
+
}
|
340 |
+
return ret;
|
341 |
+
}
|
342 |
+
|
343 |
+
/// Instance creation function for all pybind11 types. It allocates the internal instance layout
|
344 |
+
/// for holding C++ objects and holders. Allocation is done lazily (the first time the instance is
|
345 |
+
/// cast to a reference or pointer), and initialization is done by an `__init__` function.
|
346 |
+
inline PyObject *make_new_instance(PyTypeObject *type) {
|
347 |
+
#if defined(PYPY_VERSION)
|
348 |
+
// PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first
|
349 |
+
// inherited object is a plain Python type (i.e. not derived from an extension type). Fix it.
|
350 |
+
ssize_t instance_size = static_cast<ssize_t>(sizeof(instance));
|
351 |
+
if (type->tp_basicsize < instance_size) {
|
352 |
+
type->tp_basicsize = instance_size;
|
353 |
+
}
|
354 |
+
#endif
|
355 |
+
PyObject *self = type->tp_alloc(type, 0);
|
356 |
+
auto *inst = reinterpret_cast<instance *>(self);
|
357 |
+
// Allocate the value/holder internals:
|
358 |
+
inst->allocate_layout();
|
359 |
+
|
360 |
+
return self;
|
361 |
+
}
|
362 |
+
|
363 |
+
/// Instance creation function for all pybind11 types. It only allocates space for the
|
364 |
+
/// C++ object, but doesn't call the constructor -- an `__init__` function must do that.
|
365 |
+
extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) {
|
366 |
+
return make_new_instance(type);
|
367 |
+
}
|
368 |
+
|
369 |
+
/// An `__init__` function constructs the C++ object. Users should provide at least one
|
370 |
+
/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the
|
371 |
+
/// following default function will be used which simply throws an exception.
|
372 |
+
extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) {
|
373 |
+
PyTypeObject *type = Py_TYPE(self);
|
374 |
+
std::string msg = get_fully_qualified_tp_name(type) + ": No constructor defined!";
|
375 |
+
set_error(PyExc_TypeError, msg.c_str());
|
376 |
+
return -1;
|
377 |
+
}
|
378 |
+
|
379 |
+
inline void add_patient(PyObject *nurse, PyObject *patient) {
|
380 |
+
auto &internals = get_internals();
|
381 |
+
auto *instance = reinterpret_cast<detail::instance *>(nurse);
|
382 |
+
instance->has_patients = true;
|
383 |
+
Py_INCREF(patient);
|
384 |
+
internals.patients[nurse].push_back(patient);
|
385 |
+
}
|
386 |
+
|
387 |
+
inline void clear_patients(PyObject *self) {
|
388 |
+
auto *instance = reinterpret_cast<detail::instance *>(self);
|
389 |
+
auto &internals = get_internals();
|
390 |
+
auto pos = internals.patients.find(self);
|
391 |
+
assert(pos != internals.patients.end());
|
392 |
+
// Clearing the patients can cause more Python code to run, which
|
393 |
+
// can invalidate the iterator. Extract the vector of patients
|
394 |
+
// from the unordered_map first.
|
395 |
+
auto patients = std::move(pos->second);
|
396 |
+
internals.patients.erase(pos);
|
397 |
+
instance->has_patients = false;
|
398 |
+
for (PyObject *&patient : patients) {
|
399 |
+
Py_CLEAR(patient);
|
400 |
+
}
|
401 |
+
}
|
402 |
+
|
403 |
+
/// Clears all internal data from the instance and removes it from registered instances in
|
404 |
+
/// preparation for deallocation.
|
405 |
+
inline void clear_instance(PyObject *self) {
|
406 |
+
auto *instance = reinterpret_cast<detail::instance *>(self);
|
407 |
+
|
408 |
+
// Deallocate any values/holders, if present:
|
409 |
+
for (auto &v_h : values_and_holders(instance)) {
|
410 |
+
if (v_h) {
|
411 |
+
|
412 |
+
// We have to deregister before we call dealloc because, for virtual MI types, we still
|
413 |
+
// need to be able to get the parent pointers.
|
414 |
+
if (v_h.instance_registered()
|
415 |
+
&& !deregister_instance(instance, v_h.value_ptr(), v_h.type)) {
|
416 |
+
pybind11_fail(
|
417 |
+
"pybind11_object_dealloc(): Tried to deallocate unregistered instance!");
|
418 |
+
}
|
419 |
+
|
420 |
+
if (instance->owned || v_h.holder_constructed()) {
|
421 |
+
v_h.type->dealloc(v_h);
|
422 |
+
}
|
423 |
+
}
|
424 |
+
}
|
425 |
+
// Deallocate the value/holder layout internals:
|
426 |
+
instance->deallocate_layout();
|
427 |
+
|
428 |
+
if (instance->weakrefs) {
|
429 |
+
PyObject_ClearWeakRefs(self);
|
430 |
+
}
|
431 |
+
|
432 |
+
PyObject **dict_ptr = _PyObject_GetDictPtr(self);
|
433 |
+
if (dict_ptr) {
|
434 |
+
Py_CLEAR(*dict_ptr);
|
435 |
+
}
|
436 |
+
|
437 |
+
if (instance->has_patients) {
|
438 |
+
clear_patients(self);
|
439 |
+
}
|
440 |
+
}
|
441 |
+
|
442 |
+
/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc`
|
443 |
+
/// to destroy the C++ object itself, while the rest is Python bookkeeping.
|
444 |
+
extern "C" inline void pybind11_object_dealloc(PyObject *self) {
|
445 |
+
auto *type = Py_TYPE(self);
|
446 |
+
|
447 |
+
// If this is a GC tracked object, untrack it first
|
448 |
+
// Note that the track call is implicitly done by the
|
449 |
+
// default tp_alloc, which we never override.
|
450 |
+
if (PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) != 0) {
|
451 |
+
PyObject_GC_UnTrack(self);
|
452 |
+
}
|
453 |
+
|
454 |
+
clear_instance(self);
|
455 |
+
|
456 |
+
type->tp_free(self);
|
457 |
+
|
458 |
+
#if PY_VERSION_HEX < 0x03080000
|
459 |
+
// `type->tp_dealloc != pybind11_object_dealloc` means that we're being called
|
460 |
+
// as part of a derived type's dealloc, in which case we're not allowed to decref
|
461 |
+
// the type here. For cross-module compatibility, we shouldn't compare directly
|
462 |
+
// with `pybind11_object_dealloc`, but with the common one stashed in internals.
|
463 |
+
auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base;
|
464 |
+
if (type->tp_dealloc == pybind11_object_type->tp_dealloc)
|
465 |
+
Py_DECREF(type);
|
466 |
+
#else
|
467 |
+
// This was not needed before Python 3.8 (Python issue 35810)
|
468 |
+
// https://github.com/pybind/pybind11/issues/1946
|
469 |
+
Py_DECREF(type);
|
470 |
+
#endif
|
471 |
+
}
|
472 |
+
|
473 |
+
std::string error_string();
|
474 |
+
|
475 |
+
/** Create the type which can be used as a common base for all classes. This is
|
476 |
+
needed in order to satisfy Python's requirements for multiple inheritance.
|
477 |
+
Return value: New reference. */
|
478 |
+
inline PyObject *make_object_base_type(PyTypeObject *metaclass) {
|
479 |
+
constexpr auto *name = "pybind11_object";
|
480 |
+
auto name_obj = reinterpret_steal<object>(PYBIND11_FROM_STRING(name));
|
481 |
+
|
482 |
+
/* Danger zone: from now (and until PyType_Ready), make sure to
|
483 |
+
issue no Python C API calls which could potentially invoke the
|
484 |
+
garbage collector (the GC will call type_traverse(), which will in
|
485 |
+
turn find the newly constructed type in an invalid state) */
|
486 |
+
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
|
487 |
+
if (!heap_type) {
|
488 |
+
pybind11_fail("make_object_base_type(): error allocating type!");
|
489 |
+
}
|
490 |
+
|
491 |
+
heap_type->ht_name = name_obj.inc_ref().ptr();
|
492 |
+
#ifdef PYBIND11_BUILTIN_QUALNAME
|
493 |
+
heap_type->ht_qualname = name_obj.inc_ref().ptr();
|
494 |
+
#endif
|
495 |
+
|
496 |
+
auto *type = &heap_type->ht_type;
|
497 |
+
type->tp_name = name;
|
498 |
+
type->tp_base = type_incref(&PyBaseObject_Type);
|
499 |
+
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
|
500 |
+
type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE;
|
501 |
+
|
502 |
+
type->tp_new = pybind11_object_new;
|
503 |
+
type->tp_init = pybind11_object_init;
|
504 |
+
type->tp_dealloc = pybind11_object_dealloc;
|
505 |
+
|
506 |
+
/* Support weak references (needed for the keep_alive feature) */
|
507 |
+
type->tp_weaklistoffset = offsetof(instance, weakrefs);
|
508 |
+
|
509 |
+
if (PyType_Ready(type) < 0) {
|
510 |
+
pybind11_fail("PyType_Ready failed in make_object_base_type(): " + error_string());
|
511 |
+
}
|
512 |
+
|
513 |
+
setattr((PyObject *) type, "__module__", str("pybind11_builtins"));
|
514 |
+
PYBIND11_SET_OLDPY_QUALNAME(type, name_obj);
|
515 |
+
|
516 |
+
assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
|
517 |
+
return (PyObject *) heap_type;
|
518 |
+
}
|
519 |
+
|
520 |
+
/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`.
|
521 |
+
extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) {
|
522 |
+
#if PY_VERSION_HEX >= 0x030D0000
|
523 |
+
PyObject_VisitManagedDict(self, visit, arg);
|
524 |
+
#else
|
525 |
+
PyObject *&dict = *_PyObject_GetDictPtr(self);
|
526 |
+
Py_VISIT(dict);
|
527 |
+
#endif
|
528 |
+
// https://docs.python.org/3/c-api/typeobj.html#c.PyTypeObject.tp_traverse
|
529 |
+
#if PY_VERSION_HEX >= 0x03090000
|
530 |
+
Py_VISIT(Py_TYPE(self));
|
531 |
+
#endif
|
532 |
+
return 0;
|
533 |
+
}
|
534 |
+
|
535 |
+
/// dynamic_attr: Allow the GC to clear the dictionary.
|
536 |
+
extern "C" inline int pybind11_clear(PyObject *self) {
|
537 |
+
#if PY_VERSION_HEX >= 0x030D0000
|
538 |
+
PyObject_ClearManagedDict(self);
|
539 |
+
#else
|
540 |
+
PyObject *&dict = *_PyObject_GetDictPtr(self);
|
541 |
+
Py_CLEAR(dict);
|
542 |
+
#endif
|
543 |
+
return 0;
|
544 |
+
}
|
545 |
+
|
546 |
+
/// Give instances of this type a `__dict__` and opt into garbage collection.
|
547 |
+
inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) {
|
548 |
+
auto *type = &heap_type->ht_type;
|
549 |
+
type->tp_flags |= Py_TPFLAGS_HAVE_GC;
|
550 |
+
#if PY_VERSION_HEX < 0x030B0000
|
551 |
+
type->tp_dictoffset = type->tp_basicsize; // place dict at the end
|
552 |
+
type->tp_basicsize += (ssize_t) sizeof(PyObject *); // and allocate enough space for it
|
553 |
+
#else
|
554 |
+
type->tp_flags |= Py_TPFLAGS_MANAGED_DICT;
|
555 |
+
#endif
|
556 |
+
type->tp_traverse = pybind11_traverse;
|
557 |
+
type->tp_clear = pybind11_clear;
|
558 |
+
|
559 |
+
static PyGetSetDef getset[] = {{
|
560 |
+
#if PY_VERSION_HEX < 0x03070000
|
561 |
+
const_cast<char *>("__dict__"),
|
562 |
+
#else
|
563 |
+
"__dict__",
|
564 |
+
#endif
|
565 |
+
PyObject_GenericGetDict,
|
566 |
+
PyObject_GenericSetDict,
|
567 |
+
nullptr,
|
568 |
+
nullptr},
|
569 |
+
{nullptr, nullptr, nullptr, nullptr, nullptr}};
|
570 |
+
type->tp_getset = getset;
|
571 |
+
}
|
572 |
+
|
573 |
+
/// buffer_protocol: Fill in the view as specified by flags.
|
574 |
+
extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) {
|
575 |
+
// Look for a `get_buffer` implementation in this type's info or any bases (following MRO).
|
576 |
+
type_info *tinfo = nullptr;
|
577 |
+
for (auto type : reinterpret_borrow<tuple>(Py_TYPE(obj)->tp_mro)) {
|
578 |
+
tinfo = get_type_info((PyTypeObject *) type.ptr());
|
579 |
+
if (tinfo && tinfo->get_buffer) {
|
580 |
+
break;
|
581 |
+
}
|
582 |
+
}
|
583 |
+
if (view == nullptr || !tinfo || !tinfo->get_buffer) {
|
584 |
+
if (view) {
|
585 |
+
view->obj = nullptr;
|
586 |
+
}
|
587 |
+
set_error(PyExc_BufferError, "pybind11_getbuffer(): Internal error");
|
588 |
+
return -1;
|
589 |
+
}
|
590 |
+
std::memset(view, 0, sizeof(Py_buffer));
|
591 |
+
buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data);
|
592 |
+
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) {
|
593 |
+
delete info;
|
594 |
+
// view->obj = nullptr; // Was just memset to 0, so not necessary
|
595 |
+
set_error(PyExc_BufferError, "Writable buffer requested for readonly storage");
|
596 |
+
return -1;
|
597 |
+
}
|
598 |
+
view->obj = obj;
|
599 |
+
view->ndim = 1;
|
600 |
+
view->internal = info;
|
601 |
+
view->buf = info->ptr;
|
602 |
+
view->itemsize = info->itemsize;
|
603 |
+
view->len = view->itemsize;
|
604 |
+
for (auto s : info->shape) {
|
605 |
+
view->len *= s;
|
606 |
+
}
|
607 |
+
view->readonly = static_cast<int>(info->readonly);
|
608 |
+
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) {
|
609 |
+
view->format = const_cast<char *>(info->format.c_str());
|
610 |
+
}
|
611 |
+
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
|
612 |
+
view->ndim = (int) info->ndim;
|
613 |
+
view->strides = info->strides.data();
|
614 |
+
view->shape = info->shape.data();
|
615 |
+
}
|
616 |
+
Py_INCREF(view->obj);
|
617 |
+
return 0;
|
618 |
+
}
|
619 |
+
|
620 |
+
/// buffer_protocol: Release the resources of the buffer.
|
621 |
+
extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) {
|
622 |
+
delete (buffer_info *) view->internal;
|
623 |
+
}
|
624 |
+
|
625 |
+
/// Give this type a buffer interface.
|
626 |
+
inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) {
|
627 |
+
heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer;
|
628 |
+
|
629 |
+
heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer;
|
630 |
+
heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer;
|
631 |
+
}
|
632 |
+
|
633 |
+
/** Create a brand new Python type according to the `type_record` specification.
|
634 |
+
Return value: New reference. */
|
635 |
+
inline PyObject *make_new_python_type(const type_record &rec) {
|
636 |
+
auto name = reinterpret_steal<object>(PYBIND11_FROM_STRING(rec.name));
|
637 |
+
|
638 |
+
auto qualname = name;
|
639 |
+
if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) {
|
640 |
+
qualname = reinterpret_steal<object>(
|
641 |
+
PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr()));
|
642 |
+
}
|
643 |
+
|
644 |
+
object module_;
|
645 |
+
if (rec.scope) {
|
646 |
+
if (hasattr(rec.scope, "__module__")) {
|
647 |
+
module_ = rec.scope.attr("__module__");
|
648 |
+
} else if (hasattr(rec.scope, "__name__")) {
|
649 |
+
module_ = rec.scope.attr("__name__");
|
650 |
+
}
|
651 |
+
}
|
652 |
+
|
653 |
+
const auto *full_name = c_str(
|
654 |
+
#if !defined(PYPY_VERSION)
|
655 |
+
module_ ? str(module_).cast<std::string>() + "." + rec.name :
|
656 |
+
#endif
|
657 |
+
rec.name);
|
658 |
+
|
659 |
+
char *tp_doc = nullptr;
|
660 |
+
if (rec.doc && options::show_user_defined_docstrings()) {
|
661 |
+
/* Allocate memory for docstring (using PyObject_MALLOC, since
|
662 |
+
Python will free this later on) */
|
663 |
+
size_t size = std::strlen(rec.doc) + 1;
|
664 |
+
tp_doc = (char *) PyObject_MALLOC(size);
|
665 |
+
std::memcpy((void *) tp_doc, rec.doc, size);
|
666 |
+
}
|
667 |
+
|
668 |
+
auto &internals = get_internals();
|
669 |
+
auto bases = tuple(rec.bases);
|
670 |
+
auto *base = (bases.empty()) ? internals.instance_base : bases[0].ptr();
|
671 |
+
|
672 |
+
/* Danger zone: from now (and until PyType_Ready), make sure to
|
673 |
+
issue no Python C API calls which could potentially invoke the
|
674 |
+
garbage collector (the GC will call type_traverse(), which will in
|
675 |
+
turn find the newly constructed type in an invalid state) */
|
676 |
+
auto *metaclass
|
677 |
+
= rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() : internals.default_metaclass;
|
678 |
+
|
679 |
+
auto *heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0);
|
680 |
+
if (!heap_type) {
|
681 |
+
pybind11_fail(std::string(rec.name) + ": Unable to create type object!");
|
682 |
+
}
|
683 |
+
|
684 |
+
heap_type->ht_name = name.release().ptr();
|
685 |
+
#ifdef PYBIND11_BUILTIN_QUALNAME
|
686 |
+
heap_type->ht_qualname = qualname.inc_ref().ptr();
|
687 |
+
#endif
|
688 |
+
|
689 |
+
auto *type = &heap_type->ht_type;
|
690 |
+
type->tp_name = full_name;
|
691 |
+
type->tp_doc = tp_doc;
|
692 |
+
type->tp_base = type_incref((PyTypeObject *) base);
|
693 |
+
type->tp_basicsize = static_cast<ssize_t>(sizeof(instance));
|
694 |
+
if (!bases.empty()) {
|
695 |
+
type->tp_bases = bases.release().ptr();
|
696 |
+
}
|
697 |
+
|
698 |
+
/* Don't inherit base __init__ */
|
699 |
+
type->tp_init = pybind11_object_init;
|
700 |
+
|
701 |
+
/* Supported protocols */
|
702 |
+
type->tp_as_number = &heap_type->as_number;
|
703 |
+
type->tp_as_sequence = &heap_type->as_sequence;
|
704 |
+
type->tp_as_mapping = &heap_type->as_mapping;
|
705 |
+
type->tp_as_async = &heap_type->as_async;
|
706 |
+
|
707 |
+
/* Flags */
|
708 |
+
type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE;
|
709 |
+
if (!rec.is_final) {
|
710 |
+
type->tp_flags |= Py_TPFLAGS_BASETYPE;
|
711 |
+
}
|
712 |
+
|
713 |
+
if (rec.dynamic_attr) {
|
714 |
+
enable_dynamic_attributes(heap_type);
|
715 |
+
}
|
716 |
+
|
717 |
+
if (rec.buffer_protocol) {
|
718 |
+
enable_buffer_protocol(heap_type);
|
719 |
+
}
|
720 |
+
|
721 |
+
if (rec.custom_type_setup_callback) {
|
722 |
+
rec.custom_type_setup_callback(heap_type);
|
723 |
+
}
|
724 |
+
|
725 |
+
if (PyType_Ready(type) < 0) {
|
726 |
+
pybind11_fail(std::string(rec.name) + ": PyType_Ready failed: " + error_string());
|
727 |
+
}
|
728 |
+
|
729 |
+
assert(!rec.dynamic_attr || PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC));
|
730 |
+
|
731 |
+
/* Register type with the parent scope */
|
732 |
+
if (rec.scope) {
|
733 |
+
setattr(rec.scope, rec.name, (PyObject *) type);
|
734 |
+
} else {
|
735 |
+
Py_INCREF(type); // Keep it alive forever (reference leak)
|
736 |
+
}
|
737 |
+
|
738 |
+
if (module_) { // Needed by pydoc
|
739 |
+
setattr((PyObject *) type, "__module__", module_);
|
740 |
+
}
|
741 |
+
|
742 |
+
PYBIND11_SET_OLDPY_QUALNAME(type, qualname);
|
743 |
+
|
744 |
+
return (PyObject *) type;
|
745 |
+
}
|
746 |
+
|
747 |
+
PYBIND11_NAMESPACE_END(detail)
|
748 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/common.h
ADDED
@@ -0,0 +1,1267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/common.h -- Basic macros
|
3 |
+
|
4 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#define PYBIND11_VERSION_MAJOR 2
|
13 |
+
#define PYBIND11_VERSION_MINOR 12
|
14 |
+
#define PYBIND11_VERSION_PATCH 0
|
15 |
+
|
16 |
+
// Similar to Python's convention: https://docs.python.org/3/c-api/apiabiversion.html
|
17 |
+
// Additional convention: 0xD = dev
|
18 |
+
#define PYBIND11_VERSION_HEX 0x020C0000
|
19 |
+
|
20 |
+
// Define some generic pybind11 helper macros for warning management.
|
21 |
+
//
|
22 |
+
// Note that compiler-specific push/pop pairs are baked into the
|
23 |
+
// PYBIND11_NAMESPACE_BEGIN/PYBIND11_NAMESPACE_END pair of macros. Therefore manual
|
24 |
+
// PYBIND11_WARNING_PUSH/PYBIND11_WARNING_POP are usually only needed in `#include` sections.
|
25 |
+
//
|
26 |
+
// If you find you need to suppress a warning, please try to make the suppression as local as
|
27 |
+
// possible using these macros. Please also be sure to push/pop with the pybind11 macros. Please
|
28 |
+
// only use compiler specifics if you need to check specific versions, e.g. Apple Clang vs. vanilla
|
29 |
+
// Clang.
|
30 |
+
#if defined(_MSC_VER)
|
31 |
+
# define PYBIND11_COMPILER_MSVC
|
32 |
+
# define PYBIND11_PRAGMA(...) __pragma(__VA_ARGS__)
|
33 |
+
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning(push))
|
34 |
+
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning(pop))
|
35 |
+
#elif defined(__INTEL_COMPILER)
|
36 |
+
# define PYBIND11_COMPILER_INTEL
|
37 |
+
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
|
38 |
+
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(warning push)
|
39 |
+
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(warning pop)
|
40 |
+
#elif defined(__clang__)
|
41 |
+
# define PYBIND11_COMPILER_CLANG
|
42 |
+
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
|
43 |
+
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(clang diagnostic push)
|
44 |
+
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(clang diagnostic push)
|
45 |
+
#elif defined(__GNUC__)
|
46 |
+
# define PYBIND11_COMPILER_GCC
|
47 |
+
# define PYBIND11_PRAGMA(...) _Pragma(#__VA_ARGS__)
|
48 |
+
# define PYBIND11_WARNING_PUSH PYBIND11_PRAGMA(GCC diagnostic push)
|
49 |
+
# define PYBIND11_WARNING_POP PYBIND11_PRAGMA(GCC diagnostic pop)
|
50 |
+
#endif
|
51 |
+
|
52 |
+
#ifdef PYBIND11_COMPILER_MSVC
|
53 |
+
# define PYBIND11_WARNING_DISABLE_MSVC(name) PYBIND11_PRAGMA(warning(disable : name))
|
54 |
+
#else
|
55 |
+
# define PYBIND11_WARNING_DISABLE_MSVC(name)
|
56 |
+
#endif
|
57 |
+
|
58 |
+
#ifdef PYBIND11_COMPILER_CLANG
|
59 |
+
# define PYBIND11_WARNING_DISABLE_CLANG(name) PYBIND11_PRAGMA(clang diagnostic ignored name)
|
60 |
+
#else
|
61 |
+
# define PYBIND11_WARNING_DISABLE_CLANG(name)
|
62 |
+
#endif
|
63 |
+
|
64 |
+
#ifdef PYBIND11_COMPILER_GCC
|
65 |
+
# define PYBIND11_WARNING_DISABLE_GCC(name) PYBIND11_PRAGMA(GCC diagnostic ignored name)
|
66 |
+
#else
|
67 |
+
# define PYBIND11_WARNING_DISABLE_GCC(name)
|
68 |
+
#endif
|
69 |
+
|
70 |
+
#ifdef PYBIND11_COMPILER_INTEL
|
71 |
+
# define PYBIND11_WARNING_DISABLE_INTEL(name) PYBIND11_PRAGMA(warning disable name)
|
72 |
+
#else
|
73 |
+
# define PYBIND11_WARNING_DISABLE_INTEL(name)
|
74 |
+
#endif
|
75 |
+
|
76 |
+
#define PYBIND11_NAMESPACE_BEGIN(name) \
|
77 |
+
namespace name { \
|
78 |
+
PYBIND11_WARNING_PUSH
|
79 |
+
|
80 |
+
#define PYBIND11_NAMESPACE_END(name) \
|
81 |
+
PYBIND11_WARNING_POP \
|
82 |
+
}
|
83 |
+
|
84 |
+
// Robust support for some features and loading modules compiled against different pybind versions
|
85 |
+
// requires forcing hidden visibility on pybind code, so we enforce this by setting the attribute
|
86 |
+
// on the main `pybind11` namespace.
|
87 |
+
#if !defined(PYBIND11_NAMESPACE)
|
88 |
+
# ifdef __GNUG__
|
89 |
+
# define PYBIND11_NAMESPACE pybind11 __attribute__((visibility("hidden")))
|
90 |
+
# else
|
91 |
+
# define PYBIND11_NAMESPACE pybind11
|
92 |
+
# endif
|
93 |
+
#endif
|
94 |
+
|
95 |
+
#if !(defined(_MSC_VER) && __cplusplus == 199711L)
|
96 |
+
# if __cplusplus >= 201402L
|
97 |
+
# define PYBIND11_CPP14
|
98 |
+
# if __cplusplus >= 201703L
|
99 |
+
# define PYBIND11_CPP17
|
100 |
+
# if __cplusplus >= 202002L
|
101 |
+
# define PYBIND11_CPP20
|
102 |
+
// Please update tests/pybind11_tests.cpp `cpp_std()` when adding a macro here.
|
103 |
+
# endif
|
104 |
+
# endif
|
105 |
+
# endif
|
106 |
+
#elif defined(_MSC_VER) && __cplusplus == 199711L
|
107 |
+
// MSVC sets _MSVC_LANG rather than __cplusplus (supposedly until the standard is fully
|
108 |
+
// implemented). Unless you use the /Zc:__cplusplus flag on Visual Studio 2017 15.7 Preview 3
|
109 |
+
// or newer.
|
110 |
+
# if _MSVC_LANG >= 201402L
|
111 |
+
# define PYBIND11_CPP14
|
112 |
+
# if _MSVC_LANG > 201402L
|
113 |
+
# define PYBIND11_CPP17
|
114 |
+
# if _MSVC_LANG >= 202002L
|
115 |
+
# define PYBIND11_CPP20
|
116 |
+
# endif
|
117 |
+
# endif
|
118 |
+
# endif
|
119 |
+
#endif
|
120 |
+
|
121 |
+
#if defined(PYBIND11_CPP20)
|
122 |
+
# define PYBIND11_CONSTINIT constinit
|
123 |
+
# define PYBIND11_DTOR_CONSTEXPR constexpr
|
124 |
+
#else
|
125 |
+
# define PYBIND11_CONSTINIT
|
126 |
+
# define PYBIND11_DTOR_CONSTEXPR
|
127 |
+
#endif
|
128 |
+
|
129 |
+
// Compiler version assertions
|
130 |
+
#if defined(__INTEL_COMPILER)
|
131 |
+
# if __INTEL_COMPILER < 1800
|
132 |
+
# error pybind11 requires Intel C++ compiler v18 or newer
|
133 |
+
# elif __INTEL_COMPILER < 1900 && defined(PYBIND11_CPP14)
|
134 |
+
# error pybind11 supports only C++11 with Intel C++ compiler v18. Use v19 or newer for C++14.
|
135 |
+
# endif
|
136 |
+
/* The following pragma cannot be pop'ed:
|
137 |
+
https://community.intel.com/t5/Intel-C-Compiler/Inline-and-no-inline-warning/td-p/1216764 */
|
138 |
+
# pragma warning disable 2196 // warning #2196: routine is both "inline" and "noinline"
|
139 |
+
#elif defined(__clang__) && !defined(__apple_build_version__)
|
140 |
+
# if __clang_major__ < 3 || (__clang_major__ == 3 && __clang_minor__ < 3)
|
141 |
+
# error pybind11 requires clang 3.3 or newer
|
142 |
+
# endif
|
143 |
+
#elif defined(__clang__)
|
144 |
+
// Apple changes clang version macros to its Xcode version; the first Xcode release based on
|
145 |
+
// (upstream) clang 3.3 was Xcode 5:
|
146 |
+
# if __clang_major__ < 5
|
147 |
+
# error pybind11 requires Xcode/clang 5.0 or newer
|
148 |
+
# endif
|
149 |
+
#elif defined(__GNUG__)
|
150 |
+
# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 8)
|
151 |
+
# error pybind11 requires gcc 4.8 or newer
|
152 |
+
# endif
|
153 |
+
#elif defined(_MSC_VER)
|
154 |
+
# if _MSC_VER < 1910
|
155 |
+
# error pybind11 2.10+ requires MSVC 2017 or newer
|
156 |
+
# endif
|
157 |
+
#endif
|
158 |
+
|
159 |
+
#if !defined(PYBIND11_EXPORT)
|
160 |
+
# if defined(WIN32) || defined(_WIN32)
|
161 |
+
# define PYBIND11_EXPORT __declspec(dllexport)
|
162 |
+
# else
|
163 |
+
# define PYBIND11_EXPORT __attribute__((visibility("default")))
|
164 |
+
# endif
|
165 |
+
#endif
|
166 |
+
|
167 |
+
#if !defined(PYBIND11_EXPORT_EXCEPTION)
|
168 |
+
# if defined(__apple_build_version__)
|
169 |
+
# define PYBIND11_EXPORT_EXCEPTION PYBIND11_EXPORT
|
170 |
+
# else
|
171 |
+
# define PYBIND11_EXPORT_EXCEPTION
|
172 |
+
# endif
|
173 |
+
#endif
|
174 |
+
|
175 |
+
// For CUDA, GCC7, GCC8:
|
176 |
+
// PYBIND11_NOINLINE_FORCED is incompatible with `-Wattributes -Werror`.
|
177 |
+
// When defining PYBIND11_NOINLINE_FORCED, it is best to also use `-Wno-attributes`.
|
178 |
+
// However, the measured shared-library size saving when using noinline are only
|
179 |
+
// 1.7% for CUDA, -0.2% for GCC7, and 0.0% for GCC8 (using -DCMAKE_BUILD_TYPE=MinSizeRel,
|
180 |
+
// the default under pybind11/tests).
|
181 |
+
#if !defined(PYBIND11_NOINLINE_FORCED) \
|
182 |
+
&& (defined(__CUDACC__) || (defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8)))
|
183 |
+
# define PYBIND11_NOINLINE_DISABLED
|
184 |
+
#endif
|
185 |
+
|
186 |
+
// The PYBIND11_NOINLINE macro is for function DEFINITIONS.
|
187 |
+
// In contrast, FORWARD DECLARATIONS should never use this macro:
|
188 |
+
// https://stackoverflow.com/questions/9317473/forward-declaration-of-inline-functions
|
189 |
+
#if defined(PYBIND11_NOINLINE_DISABLED) // Option for maximum portability and experimentation.
|
190 |
+
# define PYBIND11_NOINLINE inline
|
191 |
+
#elif defined(_MSC_VER)
|
192 |
+
# define PYBIND11_NOINLINE __declspec(noinline) inline
|
193 |
+
#else
|
194 |
+
# define PYBIND11_NOINLINE __attribute__((noinline)) inline
|
195 |
+
#endif
|
196 |
+
|
197 |
+
#if defined(__MINGW32__)
|
198 |
+
// For unknown reasons all PYBIND11_DEPRECATED member trigger a warning when declared
|
199 |
+
// whether it is used or not
|
200 |
+
# define PYBIND11_DEPRECATED(reason)
|
201 |
+
#elif defined(PYBIND11_CPP14)
|
202 |
+
# define PYBIND11_DEPRECATED(reason) [[deprecated(reason)]]
|
203 |
+
#else
|
204 |
+
# define PYBIND11_DEPRECATED(reason) __attribute__((deprecated(reason)))
|
205 |
+
#endif
|
206 |
+
|
207 |
+
#if defined(PYBIND11_CPP17)
|
208 |
+
# define PYBIND11_MAYBE_UNUSED [[maybe_unused]]
|
209 |
+
#elif defined(_MSC_VER) && !defined(__clang__)
|
210 |
+
# define PYBIND11_MAYBE_UNUSED
|
211 |
+
#else
|
212 |
+
# define PYBIND11_MAYBE_UNUSED __attribute__((__unused__))
|
213 |
+
#endif
|
214 |
+
|
215 |
+
/* Don't let Python.h #define (v)snprintf as macro because they are implemented
|
216 |
+
properly in Visual Studio since 2015. */
|
217 |
+
#if defined(_MSC_VER)
|
218 |
+
# define HAVE_SNPRINTF 1
|
219 |
+
#endif
|
220 |
+
|
221 |
+
/// Include Python header, disable linking to pythonX_d.lib on Windows in debug mode
|
222 |
+
#if defined(_MSC_VER)
|
223 |
+
PYBIND11_WARNING_PUSH
|
224 |
+
PYBIND11_WARNING_DISABLE_MSVC(4505)
|
225 |
+
// C4505: 'PySlice_GetIndicesEx': unreferenced local function has been removed (PyPy only)
|
226 |
+
# if defined(_DEBUG) && !defined(Py_DEBUG)
|
227 |
+
// Workaround for a VS 2022 issue.
|
228 |
+
// NOTE: This workaround knowingly violates the Python.h include order requirement:
|
229 |
+
// https://docs.python.org/3/c-api/intro.html#include-files
|
230 |
+
// See https://github.com/pybind/pybind11/pull/3497 for full context.
|
231 |
+
# include <yvals.h>
|
232 |
+
# if _MSVC_STL_VERSION >= 143
|
233 |
+
# include <crtdefs.h>
|
234 |
+
# endif
|
235 |
+
# define PYBIND11_DEBUG_MARKER
|
236 |
+
# undef _DEBUG
|
237 |
+
# endif
|
238 |
+
#endif
|
239 |
+
|
240 |
+
// https://en.cppreference.com/w/c/chrono/localtime
|
241 |
+
#if defined(__STDC_LIB_EXT1__) && !defined(__STDC_WANT_LIB_EXT1__)
|
242 |
+
# define __STDC_WANT_LIB_EXT1__
|
243 |
+
#endif
|
244 |
+
|
245 |
+
#ifdef __has_include
|
246 |
+
// std::optional (but including it in c++14 mode isn't allowed)
|
247 |
+
# if defined(PYBIND11_CPP17) && __has_include(<optional>)
|
248 |
+
# define PYBIND11_HAS_OPTIONAL 1
|
249 |
+
# endif
|
250 |
+
// std::experimental::optional (but not allowed in c++11 mode)
|
251 |
+
# if defined(PYBIND11_CPP14) && (__has_include(<experimental/optional>) && \
|
252 |
+
!__has_include(<optional>))
|
253 |
+
# define PYBIND11_HAS_EXP_OPTIONAL 1
|
254 |
+
# endif
|
255 |
+
// std::variant
|
256 |
+
# if defined(PYBIND11_CPP17) && __has_include(<variant>)
|
257 |
+
# define PYBIND11_HAS_VARIANT 1
|
258 |
+
# endif
|
259 |
+
#elif defined(_MSC_VER) && defined(PYBIND11_CPP17)
|
260 |
+
# define PYBIND11_HAS_OPTIONAL 1
|
261 |
+
# define PYBIND11_HAS_VARIANT 1
|
262 |
+
#endif
|
263 |
+
|
264 |
+
#if defined(PYBIND11_CPP17)
|
265 |
+
# if defined(__has_include)
|
266 |
+
# if __has_include(<string_view>)
|
267 |
+
# define PYBIND11_HAS_STRING_VIEW
|
268 |
+
# endif
|
269 |
+
# elif defined(_MSC_VER)
|
270 |
+
# define PYBIND11_HAS_STRING_VIEW
|
271 |
+
# endif
|
272 |
+
#endif
|
273 |
+
|
274 |
+
#include <Python.h>
|
275 |
+
// Reminder: WITH_THREAD is always defined if PY_VERSION_HEX >= 0x03070000
|
276 |
+
#if PY_VERSION_HEX < 0x03060000
|
277 |
+
# error "PYTHON < 3.6 IS UNSUPPORTED. pybind11 v2.9 was the last to support Python 2 and 3.5."
|
278 |
+
#endif
|
279 |
+
#include <frameobject.h>
|
280 |
+
#include <pythread.h>
|
281 |
+
|
282 |
+
/* Python #defines overrides on all sorts of core functions, which
|
283 |
+
tends to weak havok in C++ codebases that expect these to work
|
284 |
+
like regular functions (potentially with several overloads) */
|
285 |
+
#if defined(isalnum)
|
286 |
+
# undef isalnum
|
287 |
+
# undef isalpha
|
288 |
+
# undef islower
|
289 |
+
# undef isspace
|
290 |
+
# undef isupper
|
291 |
+
# undef tolower
|
292 |
+
# undef toupper
|
293 |
+
#endif
|
294 |
+
|
295 |
+
#if defined(copysign)
|
296 |
+
# undef copysign
|
297 |
+
#endif
|
298 |
+
|
299 |
+
#if defined(PYBIND11_NUMPY_1_ONLY)
|
300 |
+
# define PYBIND11_INTERNAL_NUMPY_1_ONLY_DETECTED
|
301 |
+
#endif
|
302 |
+
|
303 |
+
#if defined(PYPY_VERSION) && !defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
|
304 |
+
# define PYBIND11_SIMPLE_GIL_MANAGEMENT
|
305 |
+
#endif
|
306 |
+
|
307 |
+
#if defined(_MSC_VER)
|
308 |
+
# if defined(PYBIND11_DEBUG_MARKER)
|
309 |
+
# define _DEBUG
|
310 |
+
# undef PYBIND11_DEBUG_MARKER
|
311 |
+
# endif
|
312 |
+
PYBIND11_WARNING_POP
|
313 |
+
#endif
|
314 |
+
|
315 |
+
#include <cstddef>
|
316 |
+
#include <cstring>
|
317 |
+
#include <exception>
|
318 |
+
#include <forward_list>
|
319 |
+
#include <memory>
|
320 |
+
#include <stdexcept>
|
321 |
+
#include <string>
|
322 |
+
#include <type_traits>
|
323 |
+
#include <typeindex>
|
324 |
+
#include <unordered_map>
|
325 |
+
#include <unordered_set>
|
326 |
+
#include <vector>
|
327 |
+
#if defined(__has_include)
|
328 |
+
# if __has_include(<version>)
|
329 |
+
# include <version>
|
330 |
+
# endif
|
331 |
+
#endif
|
332 |
+
|
333 |
+
// Must be after including <version> or one of the other headers specified by the standard
|
334 |
+
#if defined(__cpp_lib_char8_t) && __cpp_lib_char8_t >= 201811L
|
335 |
+
# define PYBIND11_HAS_U8STRING
|
336 |
+
#endif
|
337 |
+
|
338 |
+
// See description of PR #4246:
|
339 |
+
#if !defined(PYBIND11_NO_ASSERT_GIL_HELD_INCREF_DECREF) && !defined(NDEBUG) \
|
340 |
+
&& !defined(PYPY_VERSION) && !defined(PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF)
|
341 |
+
# define PYBIND11_ASSERT_GIL_HELD_INCREF_DECREF
|
342 |
+
#endif
|
343 |
+
|
344 |
+
// #define PYBIND11_STR_LEGACY_PERMISSIVE
|
345 |
+
// If DEFINED, pybind11::str can hold PyUnicodeObject or PyBytesObject
|
346 |
+
// (probably surprising and never documented, but this was the
|
347 |
+
// legacy behavior until and including v2.6.x). As a side-effect,
|
348 |
+
// pybind11::isinstance<str>() is true for both pybind11::str and
|
349 |
+
// pybind11::bytes.
|
350 |
+
// If UNDEFINED, pybind11::str can only hold PyUnicodeObject, and
|
351 |
+
// pybind11::isinstance<str>() is true only for pybind11::str.
|
352 |
+
// However, for Python 2 only (!), the pybind11::str caster
|
353 |
+
// implicitly decoded bytes to PyUnicodeObject. This was to ease
|
354 |
+
// the transition from the legacy behavior to the non-permissive
|
355 |
+
// behavior.
|
356 |
+
|
357 |
+
/// Compatibility macros for Python 2 / Python 3 versions TODO: remove
|
358 |
+
#define PYBIND11_INSTANCE_METHOD_NEW(ptr, class_) PyInstanceMethod_New(ptr)
|
359 |
+
#define PYBIND11_INSTANCE_METHOD_CHECK PyInstanceMethod_Check
|
360 |
+
#define PYBIND11_INSTANCE_METHOD_GET_FUNCTION PyInstanceMethod_GET_FUNCTION
|
361 |
+
#define PYBIND11_BYTES_CHECK PyBytes_Check
|
362 |
+
#define PYBIND11_BYTES_FROM_STRING PyBytes_FromString
|
363 |
+
#define PYBIND11_BYTES_FROM_STRING_AND_SIZE PyBytes_FromStringAndSize
|
364 |
+
#define PYBIND11_BYTES_AS_STRING_AND_SIZE PyBytes_AsStringAndSize
|
365 |
+
#define PYBIND11_BYTES_AS_STRING PyBytes_AsString
|
366 |
+
#define PYBIND11_BYTES_SIZE PyBytes_Size
|
367 |
+
#define PYBIND11_LONG_CHECK(o) PyLong_Check(o)
|
368 |
+
#define PYBIND11_LONG_AS_LONGLONG(o) PyLong_AsLongLong(o)
|
369 |
+
#define PYBIND11_LONG_FROM_SIGNED(o) PyLong_FromSsize_t((ssize_t) (o))
|
370 |
+
#define PYBIND11_LONG_FROM_UNSIGNED(o) PyLong_FromSize_t((size_t) (o))
|
371 |
+
#define PYBIND11_BYTES_NAME "bytes"
|
372 |
+
#define PYBIND11_STRING_NAME "str"
|
373 |
+
#define PYBIND11_SLICE_OBJECT PyObject
|
374 |
+
#define PYBIND11_FROM_STRING PyUnicode_FromString
|
375 |
+
#define PYBIND11_STR_TYPE ::pybind11::str
|
376 |
+
#define PYBIND11_BOOL_ATTR "__bool__"
|
377 |
+
#define PYBIND11_NB_BOOL(ptr) ((ptr)->nb_bool)
|
378 |
+
#define PYBIND11_BUILTINS_MODULE "builtins"
|
379 |
+
// Providing a separate declaration to make Clang's -Wmissing-prototypes happy.
|
380 |
+
// See comment for PYBIND11_MODULE below for why this is marked "maybe unused".
|
381 |
+
#define PYBIND11_PLUGIN_IMPL(name) \
|
382 |
+
extern "C" PYBIND11_MAYBE_UNUSED PYBIND11_EXPORT PyObject *PyInit_##name(); \
|
383 |
+
extern "C" PYBIND11_EXPORT PyObject *PyInit_##name()
|
384 |
+
|
385 |
+
#define PYBIND11_TRY_NEXT_OVERLOAD ((PyObject *) 1) // special failure return code
|
386 |
+
#define PYBIND11_STRINGIFY(x) #x
|
387 |
+
#define PYBIND11_TOSTRING(x) PYBIND11_STRINGIFY(x)
|
388 |
+
#define PYBIND11_CONCAT(first, second) first##second
|
389 |
+
#define PYBIND11_ENSURE_INTERNALS_READY pybind11::detail::get_internals();
|
390 |
+
|
391 |
+
#define PYBIND11_CHECK_PYTHON_VERSION \
|
392 |
+
{ \
|
393 |
+
const char *compiled_ver \
|
394 |
+
= PYBIND11_TOSTRING(PY_MAJOR_VERSION) "." PYBIND11_TOSTRING(PY_MINOR_VERSION); \
|
395 |
+
const char *runtime_ver = Py_GetVersion(); \
|
396 |
+
size_t len = std::strlen(compiled_ver); \
|
397 |
+
if (std::strncmp(runtime_ver, compiled_ver, len) != 0 \
|
398 |
+
|| (runtime_ver[len] >= '0' && runtime_ver[len] <= '9')) { \
|
399 |
+
PyErr_Format(PyExc_ImportError, \
|
400 |
+
"Python version mismatch: module was compiled for Python %s, " \
|
401 |
+
"but the interpreter version is incompatible: %s.", \
|
402 |
+
compiled_ver, \
|
403 |
+
runtime_ver); \
|
404 |
+
return nullptr; \
|
405 |
+
} \
|
406 |
+
}
|
407 |
+
|
408 |
+
#define PYBIND11_CATCH_INIT_EXCEPTIONS \
|
409 |
+
catch (pybind11::error_already_set & e) { \
|
410 |
+
pybind11::raise_from(e, PyExc_ImportError, "initialization failed"); \
|
411 |
+
return nullptr; \
|
412 |
+
} \
|
413 |
+
catch (const std::exception &e) { \
|
414 |
+
::pybind11::set_error(PyExc_ImportError, e.what()); \
|
415 |
+
return nullptr; \
|
416 |
+
}
|
417 |
+
|
418 |
+
/** \rst
|
419 |
+
***Deprecated in favor of PYBIND11_MODULE***
|
420 |
+
|
421 |
+
This macro creates the entry point that will be invoked when the Python interpreter
|
422 |
+
imports a plugin library. Please create a `module_` in the function body and return
|
423 |
+
the pointer to its underlying Python object at the end.
|
424 |
+
|
425 |
+
.. code-block:: cpp
|
426 |
+
|
427 |
+
PYBIND11_PLUGIN(example) {
|
428 |
+
pybind11::module_ m("example", "pybind11 example plugin");
|
429 |
+
/// Set up bindings here
|
430 |
+
return m.ptr();
|
431 |
+
}
|
432 |
+
\endrst */
|
433 |
+
#define PYBIND11_PLUGIN(name) \
|
434 |
+
PYBIND11_DEPRECATED("PYBIND11_PLUGIN is deprecated, use PYBIND11_MODULE") \
|
435 |
+
static PyObject *pybind11_init(); \
|
436 |
+
PYBIND11_PLUGIN_IMPL(name) { \
|
437 |
+
PYBIND11_CHECK_PYTHON_VERSION \
|
438 |
+
PYBIND11_ENSURE_INTERNALS_READY \
|
439 |
+
try { \
|
440 |
+
return pybind11_init(); \
|
441 |
+
} \
|
442 |
+
PYBIND11_CATCH_INIT_EXCEPTIONS \
|
443 |
+
} \
|
444 |
+
PyObject *pybind11_init()
|
445 |
+
|
446 |
+
/** \rst
|
447 |
+
This macro creates the entry point that will be invoked when the Python interpreter
|
448 |
+
imports an extension module. The module name is given as the first argument and it
|
449 |
+
should not be in quotes. The second macro argument defines a variable of type
|
450 |
+
`py::module_` which can be used to initialize the module.
|
451 |
+
|
452 |
+
The entry point is marked as "maybe unused" to aid dead-code detection analysis:
|
453 |
+
since the entry point is typically only looked up at runtime and not referenced
|
454 |
+
during translation, it would otherwise appear as unused ("dead") code.
|
455 |
+
|
456 |
+
.. code-block:: cpp
|
457 |
+
|
458 |
+
PYBIND11_MODULE(example, m) {
|
459 |
+
m.doc() = "pybind11 example module";
|
460 |
+
|
461 |
+
// Add bindings here
|
462 |
+
m.def("foo", []() {
|
463 |
+
return "Hello, World!";
|
464 |
+
});
|
465 |
+
}
|
466 |
+
\endrst */
|
467 |
+
#define PYBIND11_MODULE(name, variable) \
|
468 |
+
static ::pybind11::module_::module_def PYBIND11_CONCAT(pybind11_module_def_, name) \
|
469 |
+
PYBIND11_MAYBE_UNUSED; \
|
470 |
+
PYBIND11_MAYBE_UNUSED \
|
471 |
+
static void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ &); \
|
472 |
+
PYBIND11_PLUGIN_IMPL(name) { \
|
473 |
+
PYBIND11_CHECK_PYTHON_VERSION \
|
474 |
+
PYBIND11_ENSURE_INTERNALS_READY \
|
475 |
+
auto m = ::pybind11::module_::create_extension_module( \
|
476 |
+
PYBIND11_TOSTRING(name), nullptr, &PYBIND11_CONCAT(pybind11_module_def_, name)); \
|
477 |
+
try { \
|
478 |
+
PYBIND11_CONCAT(pybind11_init_, name)(m); \
|
479 |
+
return m.ptr(); \
|
480 |
+
} \
|
481 |
+
PYBIND11_CATCH_INIT_EXCEPTIONS \
|
482 |
+
} \
|
483 |
+
void PYBIND11_CONCAT(pybind11_init_, name)(::pybind11::module_ & (variable))
|
484 |
+
|
485 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
486 |
+
|
487 |
+
using ssize_t = Py_ssize_t;
|
488 |
+
using size_t = std::size_t;
|
489 |
+
|
490 |
+
template <typename IntType>
|
491 |
+
inline ssize_t ssize_t_cast(const IntType &val) {
|
492 |
+
static_assert(sizeof(IntType) <= sizeof(ssize_t), "Implicit narrowing is not permitted.");
|
493 |
+
return static_cast<ssize_t>(val);
|
494 |
+
}
|
495 |
+
|
496 |
+
/// Approach used to cast a previously unknown C++ instance into a Python object
|
497 |
+
enum class return_value_policy : uint8_t {
|
498 |
+
/** This is the default return value policy, which falls back to the policy
|
499 |
+
return_value_policy::take_ownership when the return value is a pointer.
|
500 |
+
Otherwise, it uses return_value::move or return_value::copy for rvalue
|
501 |
+
and lvalue references, respectively. See below for a description of what
|
502 |
+
all of these different policies do. */
|
503 |
+
automatic = 0,
|
504 |
+
|
505 |
+
/** As above, but use policy return_value_policy::reference when the return
|
506 |
+
value is a pointer. This is the default conversion policy for function
|
507 |
+
arguments when calling Python functions manually from C++ code (i.e. via
|
508 |
+
handle::operator()). You probably won't need to use this. */
|
509 |
+
automatic_reference,
|
510 |
+
|
511 |
+
/** Reference an existing object (i.e. do not create a new copy) and take
|
512 |
+
ownership. Python will call the destructor and delete operator when the
|
513 |
+
object's reference count reaches zero. Undefined behavior ensues when
|
514 |
+
the C++ side does the same.. */
|
515 |
+
take_ownership,
|
516 |
+
|
517 |
+
/** Create a new copy of the returned object, which will be owned by
|
518 |
+
Python. This policy is comparably safe because the lifetimes of the two
|
519 |
+
instances are decoupled. */
|
520 |
+
copy,
|
521 |
+
|
522 |
+
/** Use std::move to move the return value contents into a new instance
|
523 |
+
that will be owned by Python. This policy is comparably safe because the
|
524 |
+
lifetimes of the two instances (move source and destination) are
|
525 |
+
decoupled. */
|
526 |
+
move,
|
527 |
+
|
528 |
+
/** Reference an existing object, but do not take ownership. The C++ side
|
529 |
+
is responsible for managing the object's lifetime and deallocating it
|
530 |
+
when it is no longer used. Warning: undefined behavior will ensue when
|
531 |
+
the C++ side deletes an object that is still referenced and used by
|
532 |
+
Python. */
|
533 |
+
reference,
|
534 |
+
|
535 |
+
/** This policy only applies to methods and properties. It references the
|
536 |
+
object without taking ownership similar to the above
|
537 |
+
return_value_policy::reference policy. In contrast to that policy, the
|
538 |
+
function or property's implicit this argument (called the parent) is
|
539 |
+
considered to be the the owner of the return value (the child).
|
540 |
+
pybind11 then couples the lifetime of the parent to the child via a
|
541 |
+
reference relationship that ensures that the parent cannot be garbage
|
542 |
+
collected while Python is still using the child. More advanced
|
543 |
+
variations of this scheme are also possible using combinations of
|
544 |
+
return_value_policy::reference and the keep_alive call policy */
|
545 |
+
reference_internal
|
546 |
+
};
|
547 |
+
|
548 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
549 |
+
|
550 |
+
inline static constexpr int log2(size_t n, int k = 0) {
|
551 |
+
return (n <= 1) ? k : log2(n >> 1, k + 1);
|
552 |
+
}
|
553 |
+
|
554 |
+
// Returns the size as a multiple of sizeof(void *), rounded up.
|
555 |
+
inline static constexpr size_t size_in_ptrs(size_t s) {
|
556 |
+
return 1 + ((s - 1) >> log2(sizeof(void *)));
|
557 |
+
}
|
558 |
+
|
559 |
+
/**
|
560 |
+
* The space to allocate for simple layout instance holders (see below) in multiple of the size of
|
561 |
+
* a pointer (e.g. 2 means 16 bytes on 64-bit architectures). The default is the minimum required
|
562 |
+
* to holder either a std::unique_ptr or std::shared_ptr (which is almost always
|
563 |
+
* sizeof(std::shared_ptr<T>)).
|
564 |
+
*/
|
565 |
+
constexpr size_t instance_simple_holder_in_ptrs() {
|
566 |
+
static_assert(sizeof(std::shared_ptr<int>) >= sizeof(std::unique_ptr<int>),
|
567 |
+
"pybind assumes std::shared_ptrs are at least as big as std::unique_ptrs");
|
568 |
+
return size_in_ptrs(sizeof(std::shared_ptr<int>));
|
569 |
+
}
|
570 |
+
|
571 |
+
// Forward declarations
|
572 |
+
struct type_info;
|
573 |
+
struct value_and_holder;
|
574 |
+
|
575 |
+
struct nonsimple_values_and_holders {
|
576 |
+
void **values_and_holders;
|
577 |
+
uint8_t *status;
|
578 |
+
};
|
579 |
+
|
580 |
+
/// The 'instance' type which needs to be standard layout (need to be able to use 'offsetof')
|
581 |
+
struct instance {
|
582 |
+
PyObject_HEAD
|
583 |
+
/// Storage for pointers and holder; see simple_layout, below, for a description
|
584 |
+
union {
|
585 |
+
void *simple_value_holder[1 + instance_simple_holder_in_ptrs()];
|
586 |
+
nonsimple_values_and_holders nonsimple;
|
587 |
+
};
|
588 |
+
/// Weak references
|
589 |
+
PyObject *weakrefs;
|
590 |
+
/// If true, the pointer is owned which means we're free to manage it with a holder.
|
591 |
+
bool owned : 1;
|
592 |
+
/**
|
593 |
+
* An instance has two possible value/holder layouts.
|
594 |
+
*
|
595 |
+
* Simple layout (when this flag is true), means the `simple_value_holder` is set with a
|
596 |
+
* pointer and the holder object governing that pointer, i.e. [val1*][holder]. This layout is
|
597 |
+
* applied whenever there is no python-side multiple inheritance of bound C++ types *and* the
|
598 |
+
* type's holder will fit in the default space (which is large enough to hold either a
|
599 |
+
* std::unique_ptr or std::shared_ptr).
|
600 |
+
*
|
601 |
+
* Non-simple layout applies when using custom holders that require more space than
|
602 |
+
* `shared_ptr` (which is typically the size of two pointers), or when multiple inheritance is
|
603 |
+
* used on the python side. Non-simple layout allocates the required amount of memory to have
|
604 |
+
* multiple bound C++ classes as parents. Under this layout, `nonsimple.values_and_holders` is
|
605 |
+
* set to a pointer to allocated space of the required space to hold a sequence of value
|
606 |
+
* pointers and holders followed `status`, a set of bit flags (1 byte each), i.e.
|
607 |
+
* [val1*][holder1][val2*][holder2]...[bb...] where each [block] is rounded up to a multiple
|
608 |
+
* of `sizeof(void *)`. `nonsimple.status` is, for convenience, a pointer to the beginning of
|
609 |
+
* the [bb...] block (but not independently allocated).
|
610 |
+
*
|
611 |
+
* Status bits indicate whether the associated holder is constructed (&
|
612 |
+
* status_holder_constructed) and whether the value pointer is registered (&
|
613 |
+
* status_instance_registered) in `registered_instances`.
|
614 |
+
*/
|
615 |
+
bool simple_layout : 1;
|
616 |
+
/// For simple layout, tracks whether the holder has been constructed
|
617 |
+
bool simple_holder_constructed : 1;
|
618 |
+
/// For simple layout, tracks whether the instance is registered in `registered_instances`
|
619 |
+
bool simple_instance_registered : 1;
|
620 |
+
/// If true, get_internals().patients has an entry for this object
|
621 |
+
bool has_patients : 1;
|
622 |
+
|
623 |
+
/// Initializes all of the above type/values/holders data (but not the instance values
|
624 |
+
/// themselves)
|
625 |
+
void allocate_layout();
|
626 |
+
|
627 |
+
/// Destroys/deallocates all of the above
|
628 |
+
void deallocate_layout();
|
629 |
+
|
630 |
+
/// Returns the value_and_holder wrapper for the given type (or the first, if `find_type`
|
631 |
+
/// omitted). Returns a default-constructed (with `.inst = nullptr`) object on failure if
|
632 |
+
/// `throw_if_missing` is false.
|
633 |
+
value_and_holder get_value_and_holder(const type_info *find_type = nullptr,
|
634 |
+
bool throw_if_missing = true);
|
635 |
+
|
636 |
+
/// Bit values for the non-simple status flags
|
637 |
+
static constexpr uint8_t status_holder_constructed = 1;
|
638 |
+
static constexpr uint8_t status_instance_registered = 2;
|
639 |
+
};
|
640 |
+
|
641 |
+
static_assert(std::is_standard_layout<instance>::value,
|
642 |
+
"Internal error: `pybind11::detail::instance` is not standard layout!");
|
643 |
+
|
644 |
+
/// from __cpp_future__ import (convenient aliases from C++14/17)
|
645 |
+
#if defined(PYBIND11_CPP14)
|
646 |
+
using std::conditional_t;
|
647 |
+
using std::enable_if_t;
|
648 |
+
using std::remove_cv_t;
|
649 |
+
using std::remove_reference_t;
|
650 |
+
#else
|
651 |
+
template <bool B, typename T = void>
|
652 |
+
using enable_if_t = typename std::enable_if<B, T>::type;
|
653 |
+
template <bool B, typename T, typename F>
|
654 |
+
using conditional_t = typename std::conditional<B, T, F>::type;
|
655 |
+
template <typename T>
|
656 |
+
using remove_cv_t = typename std::remove_cv<T>::type;
|
657 |
+
template <typename T>
|
658 |
+
using remove_reference_t = typename std::remove_reference<T>::type;
|
659 |
+
#endif
|
660 |
+
|
661 |
+
#if defined(PYBIND11_CPP20)
|
662 |
+
using std::remove_cvref;
|
663 |
+
using std::remove_cvref_t;
|
664 |
+
#else
|
665 |
+
template <class T>
|
666 |
+
struct remove_cvref {
|
667 |
+
using type = remove_cv_t<remove_reference_t<T>>;
|
668 |
+
};
|
669 |
+
template <class T>
|
670 |
+
using remove_cvref_t = typename remove_cvref<T>::type;
|
671 |
+
#endif
|
672 |
+
|
673 |
+
/// Example usage: is_same_ignoring_cvref<T, PyObject *>::value
|
674 |
+
template <typename T, typename U>
|
675 |
+
using is_same_ignoring_cvref = std::is_same<detail::remove_cvref_t<T>, U>;
|
676 |
+
|
677 |
+
/// Index sequences
|
678 |
+
#if defined(PYBIND11_CPP14)
|
679 |
+
using std::index_sequence;
|
680 |
+
using std::make_index_sequence;
|
681 |
+
#else
|
682 |
+
template <size_t...>
|
683 |
+
struct index_sequence {};
|
684 |
+
template <size_t N, size_t... S>
|
685 |
+
struct make_index_sequence_impl : make_index_sequence_impl<N - 1, N - 1, S...> {};
|
686 |
+
template <size_t... S>
|
687 |
+
struct make_index_sequence_impl<0, S...> {
|
688 |
+
using type = index_sequence<S...>;
|
689 |
+
};
|
690 |
+
template <size_t N>
|
691 |
+
using make_index_sequence = typename make_index_sequence_impl<N>::type;
|
692 |
+
#endif
|
693 |
+
|
694 |
+
/// Make an index sequence of the indices of true arguments
|
695 |
+
template <typename ISeq, size_t, bool...>
|
696 |
+
struct select_indices_impl {
|
697 |
+
using type = ISeq;
|
698 |
+
};
|
699 |
+
template <size_t... IPrev, size_t I, bool B, bool... Bs>
|
700 |
+
struct select_indices_impl<index_sequence<IPrev...>, I, B, Bs...>
|
701 |
+
: select_indices_impl<conditional_t<B, index_sequence<IPrev..., I>, index_sequence<IPrev...>>,
|
702 |
+
I + 1,
|
703 |
+
Bs...> {};
|
704 |
+
template <bool... Bs>
|
705 |
+
using select_indices = typename select_indices_impl<index_sequence<>, 0, Bs...>::type;
|
706 |
+
|
707 |
+
/// Backports of std::bool_constant and std::negation to accommodate older compilers
|
708 |
+
template <bool B>
|
709 |
+
using bool_constant = std::integral_constant<bool, B>;
|
710 |
+
template <typename T>
|
711 |
+
struct negation : bool_constant<!T::value> {};
|
712 |
+
|
713 |
+
// PGI/Intel cannot detect operator delete with the "compatible" void_t impl, so
|
714 |
+
// using the new one (C++14 defect, so generally works on newer compilers, even
|
715 |
+
// if not in C++17 mode)
|
716 |
+
#if defined(__PGIC__) || defined(__INTEL_COMPILER)
|
717 |
+
template <typename...>
|
718 |
+
using void_t = void;
|
719 |
+
#else
|
720 |
+
template <typename...>
|
721 |
+
struct void_t_impl {
|
722 |
+
using type = void;
|
723 |
+
};
|
724 |
+
template <typename... Ts>
|
725 |
+
using void_t = typename void_t_impl<Ts...>::type;
|
726 |
+
#endif
|
727 |
+
|
728 |
+
/// Compile-time all/any/none of that check the boolean value of all template types
|
729 |
+
#if defined(__cpp_fold_expressions) && !(defined(_MSC_VER) && (_MSC_VER < 1916))
|
730 |
+
template <class... Ts>
|
731 |
+
using all_of = bool_constant<(Ts::value && ...)>;
|
732 |
+
template <class... Ts>
|
733 |
+
using any_of = bool_constant<(Ts::value || ...)>;
|
734 |
+
#elif !defined(_MSC_VER)
|
735 |
+
template <bool...>
|
736 |
+
struct bools {};
|
737 |
+
template <class... Ts>
|
738 |
+
using all_of = std::is_same<bools<Ts::value..., true>, bools<true, Ts::value...>>;
|
739 |
+
template <class... Ts>
|
740 |
+
using any_of = negation<all_of<negation<Ts>...>>;
|
741 |
+
#else
|
742 |
+
// MSVC has trouble with the above, but supports std::conjunction, which we can use instead (albeit
|
743 |
+
// at a slight loss of compilation efficiency).
|
744 |
+
template <class... Ts>
|
745 |
+
using all_of = std::conjunction<Ts...>;
|
746 |
+
template <class... Ts>
|
747 |
+
using any_of = std::disjunction<Ts...>;
|
748 |
+
#endif
|
749 |
+
template <class... Ts>
|
750 |
+
using none_of = negation<any_of<Ts...>>;
|
751 |
+
|
752 |
+
template <class T, template <class> class... Predicates>
|
753 |
+
using satisfies_all_of = all_of<Predicates<T>...>;
|
754 |
+
template <class T, template <class> class... Predicates>
|
755 |
+
using satisfies_any_of = any_of<Predicates<T>...>;
|
756 |
+
template <class T, template <class> class... Predicates>
|
757 |
+
using satisfies_none_of = none_of<Predicates<T>...>;
|
758 |
+
|
759 |
+
/// Strip the class from a method type
|
760 |
+
template <typename T>
|
761 |
+
struct remove_class {};
|
762 |
+
template <typename C, typename R, typename... A>
|
763 |
+
struct remove_class<R (C::*)(A...)> {
|
764 |
+
using type = R(A...);
|
765 |
+
};
|
766 |
+
template <typename C, typename R, typename... A>
|
767 |
+
struct remove_class<R (C::*)(A...) const> {
|
768 |
+
using type = R(A...);
|
769 |
+
};
|
770 |
+
#ifdef __cpp_noexcept_function_type
|
771 |
+
template <typename C, typename R, typename... A>
|
772 |
+
struct remove_class<R (C::*)(A...) noexcept> {
|
773 |
+
using type = R(A...);
|
774 |
+
};
|
775 |
+
template <typename C, typename R, typename... A>
|
776 |
+
struct remove_class<R (C::*)(A...) const noexcept> {
|
777 |
+
using type = R(A...);
|
778 |
+
};
|
779 |
+
#endif
|
780 |
+
/// Helper template to strip away type modifiers
|
781 |
+
template <typename T>
|
782 |
+
struct intrinsic_type {
|
783 |
+
using type = T;
|
784 |
+
};
|
785 |
+
template <typename T>
|
786 |
+
struct intrinsic_type<const T> {
|
787 |
+
using type = typename intrinsic_type<T>::type;
|
788 |
+
};
|
789 |
+
template <typename T>
|
790 |
+
struct intrinsic_type<T *> {
|
791 |
+
using type = typename intrinsic_type<T>::type;
|
792 |
+
};
|
793 |
+
template <typename T>
|
794 |
+
struct intrinsic_type<T &> {
|
795 |
+
using type = typename intrinsic_type<T>::type;
|
796 |
+
};
|
797 |
+
template <typename T>
|
798 |
+
struct intrinsic_type<T &&> {
|
799 |
+
using type = typename intrinsic_type<T>::type;
|
800 |
+
};
|
801 |
+
template <typename T, size_t N>
|
802 |
+
struct intrinsic_type<const T[N]> {
|
803 |
+
using type = typename intrinsic_type<T>::type;
|
804 |
+
};
|
805 |
+
template <typename T, size_t N>
|
806 |
+
struct intrinsic_type<T[N]> {
|
807 |
+
using type = typename intrinsic_type<T>::type;
|
808 |
+
};
|
809 |
+
template <typename T>
|
810 |
+
using intrinsic_t = typename intrinsic_type<T>::type;
|
811 |
+
|
812 |
+
/// Helper type to replace 'void' in some expressions
|
813 |
+
struct void_type {};
|
814 |
+
|
815 |
+
/// Helper template which holds a list of types
|
816 |
+
template <typename...>
|
817 |
+
struct type_list {};
|
818 |
+
|
819 |
+
/// Compile-time integer sum
|
820 |
+
#ifdef __cpp_fold_expressions
|
821 |
+
template <typename... Ts>
|
822 |
+
constexpr size_t constexpr_sum(Ts... ns) {
|
823 |
+
return (0 + ... + size_t{ns});
|
824 |
+
}
|
825 |
+
#else
|
826 |
+
constexpr size_t constexpr_sum() { return 0; }
|
827 |
+
template <typename T, typename... Ts>
|
828 |
+
constexpr size_t constexpr_sum(T n, Ts... ns) {
|
829 |
+
return size_t{n} + constexpr_sum(ns...);
|
830 |
+
}
|
831 |
+
#endif
|
832 |
+
|
833 |
+
PYBIND11_NAMESPACE_BEGIN(constexpr_impl)
|
834 |
+
/// Implementation details for constexpr functions
|
835 |
+
constexpr int first(int i) { return i; }
|
836 |
+
template <typename T, typename... Ts>
|
837 |
+
constexpr int first(int i, T v, Ts... vs) {
|
838 |
+
return v ? i : first(i + 1, vs...);
|
839 |
+
}
|
840 |
+
|
841 |
+
constexpr int last(int /*i*/, int result) { return result; }
|
842 |
+
template <typename T, typename... Ts>
|
843 |
+
constexpr int last(int i, int result, T v, Ts... vs) {
|
844 |
+
return last(i + 1, v ? i : result, vs...);
|
845 |
+
}
|
846 |
+
PYBIND11_NAMESPACE_END(constexpr_impl)
|
847 |
+
|
848 |
+
/// Return the index of the first type in Ts which satisfies Predicate<T>.
|
849 |
+
/// Returns sizeof...(Ts) if none match.
|
850 |
+
template <template <typename> class Predicate, typename... Ts>
|
851 |
+
constexpr int constexpr_first() {
|
852 |
+
return constexpr_impl::first(0, Predicate<Ts>::value...);
|
853 |
+
}
|
854 |
+
|
855 |
+
/// Return the index of the last type in Ts which satisfies Predicate<T>, or -1 if none match.
|
856 |
+
template <template <typename> class Predicate, typename... Ts>
|
857 |
+
constexpr int constexpr_last() {
|
858 |
+
return constexpr_impl::last(0, -1, Predicate<Ts>::value...);
|
859 |
+
}
|
860 |
+
|
861 |
+
/// Return the Nth element from the parameter pack
|
862 |
+
template <size_t N, typename T, typename... Ts>
|
863 |
+
struct pack_element {
|
864 |
+
using type = typename pack_element<N - 1, Ts...>::type;
|
865 |
+
};
|
866 |
+
template <typename T, typename... Ts>
|
867 |
+
struct pack_element<0, T, Ts...> {
|
868 |
+
using type = T;
|
869 |
+
};
|
870 |
+
|
871 |
+
/// Return the one and only type which matches the predicate, or Default if none match.
|
872 |
+
/// If more than one type matches the predicate, fail at compile-time.
|
873 |
+
template <template <typename> class Predicate, typename Default, typename... Ts>
|
874 |
+
struct exactly_one {
|
875 |
+
static constexpr auto found = constexpr_sum(Predicate<Ts>::value...);
|
876 |
+
static_assert(found <= 1, "Found more than one type matching the predicate");
|
877 |
+
|
878 |
+
static constexpr auto index = found ? constexpr_first<Predicate, Ts...>() : 0;
|
879 |
+
using type = conditional_t<found, typename pack_element<index, Ts...>::type, Default>;
|
880 |
+
};
|
881 |
+
template <template <typename> class P, typename Default>
|
882 |
+
struct exactly_one<P, Default> {
|
883 |
+
using type = Default;
|
884 |
+
};
|
885 |
+
|
886 |
+
template <template <typename> class Predicate, typename Default, typename... Ts>
|
887 |
+
using exactly_one_t = typename exactly_one<Predicate, Default, Ts...>::type;
|
888 |
+
|
889 |
+
/// Defer the evaluation of type T until types Us are instantiated
|
890 |
+
template <typename T, typename... /*Us*/>
|
891 |
+
struct deferred_type {
|
892 |
+
using type = T;
|
893 |
+
};
|
894 |
+
template <typename T, typename... Us>
|
895 |
+
using deferred_t = typename deferred_type<T, Us...>::type;
|
896 |
+
|
897 |
+
/// Like is_base_of, but requires a strict base (i.e. `is_strict_base_of<T, T>::value == false`,
|
898 |
+
/// unlike `std::is_base_of`)
|
899 |
+
template <typename Base, typename Derived>
|
900 |
+
using is_strict_base_of
|
901 |
+
= bool_constant<std::is_base_of<Base, Derived>::value && !std::is_same<Base, Derived>::value>;
|
902 |
+
|
903 |
+
/// Like is_base_of, but also requires that the base type is accessible (i.e. that a Derived
|
904 |
+
/// pointer can be converted to a Base pointer) For unions, `is_base_of<T, T>::value` is False, so
|
905 |
+
/// we need to check `is_same` as well.
|
906 |
+
template <typename Base, typename Derived>
|
907 |
+
using is_accessible_base_of
|
908 |
+
= bool_constant<(std::is_same<Base, Derived>::value || std::is_base_of<Base, Derived>::value)
|
909 |
+
&& std::is_convertible<Derived *, Base *>::value>;
|
910 |
+
|
911 |
+
template <template <typename...> class Base>
|
912 |
+
struct is_template_base_of_impl {
|
913 |
+
template <typename... Us>
|
914 |
+
static std::true_type check(Base<Us...> *);
|
915 |
+
static std::false_type check(...);
|
916 |
+
};
|
917 |
+
|
918 |
+
/// Check if a template is the base of a type. For example:
|
919 |
+
/// `is_template_base_of<Base, T>` is true if `struct T : Base<U> {}` where U can be anything
|
920 |
+
template <template <typename...> class Base, typename T>
|
921 |
+
// Sadly, all MSVC versions incl. 2022 need the workaround, even in C++20 mode.
|
922 |
+
// See also: https://github.com/pybind/pybind11/pull/3741
|
923 |
+
#if !defined(_MSC_VER)
|
924 |
+
using is_template_base_of
|
925 |
+
= decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr));
|
926 |
+
#else
|
927 |
+
struct is_template_base_of
|
928 |
+
: decltype(is_template_base_of_impl<Base>::check((intrinsic_t<T> *) nullptr)) {
|
929 |
+
};
|
930 |
+
#endif
|
931 |
+
|
932 |
+
/// Check if T is an instantiation of the template `Class`. For example:
|
933 |
+
/// `is_instantiation<shared_ptr, T>` is true if `T == shared_ptr<U>` where U can be anything.
|
934 |
+
template <template <typename...> class Class, typename T>
|
935 |
+
struct is_instantiation : std::false_type {};
|
936 |
+
template <template <typename...> class Class, typename... Us>
|
937 |
+
struct is_instantiation<Class, Class<Us...>> : std::true_type {};
|
938 |
+
|
939 |
+
/// Check if T is std::shared_ptr<U> where U can be anything
|
940 |
+
template <typename T>
|
941 |
+
using is_shared_ptr = is_instantiation<std::shared_ptr, T>;
|
942 |
+
|
943 |
+
/// Check if T looks like an input iterator
|
944 |
+
template <typename T, typename = void>
|
945 |
+
struct is_input_iterator : std::false_type {};
|
946 |
+
template <typename T>
|
947 |
+
struct is_input_iterator<T,
|
948 |
+
void_t<decltype(*std::declval<T &>()), decltype(++std::declval<T &>())>>
|
949 |
+
: std::true_type {};
|
950 |
+
|
951 |
+
template <typename T>
|
952 |
+
using is_function_pointer
|
953 |
+
= bool_constant<std::is_pointer<T>::value
|
954 |
+
&& std::is_function<typename std::remove_pointer<T>::type>::value>;
|
955 |
+
|
956 |
+
template <typename F>
|
957 |
+
struct strip_function_object {
|
958 |
+
// If you are encountering an
|
959 |
+
// 'error: name followed by "::" must be a class or namespace name'
|
960 |
+
// with the Intel compiler and a noexcept function here,
|
961 |
+
// try to use noexcept(true) instead of plain noexcept.
|
962 |
+
using type = typename remove_class<decltype(&F::operator())>::type;
|
963 |
+
};
|
964 |
+
|
965 |
+
// Extracts the function signature from a function, function pointer or lambda.
|
966 |
+
template <typename Function, typename F = remove_reference_t<Function>>
|
967 |
+
using function_signature_t = conditional_t<
|
968 |
+
std::is_function<F>::value,
|
969 |
+
F,
|
970 |
+
typename conditional_t<std::is_pointer<F>::value || std::is_member_pointer<F>::value,
|
971 |
+
std::remove_pointer<F>,
|
972 |
+
strip_function_object<F>>::type>;
|
973 |
+
|
974 |
+
/// Returns true if the type looks like a lambda: that is, isn't a function, pointer or member
|
975 |
+
/// pointer. Note that this can catch all sorts of other things, too; this is intended to be used
|
976 |
+
/// in a place where passing a lambda makes sense.
|
977 |
+
template <typename T>
|
978 |
+
using is_lambda = satisfies_none_of<remove_reference_t<T>,
|
979 |
+
std::is_function,
|
980 |
+
std::is_pointer,
|
981 |
+
std::is_member_pointer>;
|
982 |
+
|
983 |
+
// [workaround(intel)] Internal error on fold expression
|
984 |
+
/// Apply a function over each element of a parameter pack
|
985 |
+
#if defined(__cpp_fold_expressions) && !defined(__INTEL_COMPILER)
|
986 |
+
// Intel compiler produces an internal error on this fold expression (tested with ICC 19.0.2)
|
987 |
+
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) (((PATTERN), void()), ...)
|
988 |
+
#else
|
989 |
+
using expand_side_effects = bool[];
|
990 |
+
# define PYBIND11_EXPAND_SIDE_EFFECTS(PATTERN) \
|
991 |
+
(void) pybind11::detail::expand_side_effects { ((PATTERN), void(), false)..., false }
|
992 |
+
#endif
|
993 |
+
|
994 |
+
PYBIND11_NAMESPACE_END(detail)
|
995 |
+
|
996 |
+
/// C++ bindings of builtin Python exceptions
|
997 |
+
class PYBIND11_EXPORT_EXCEPTION builtin_exception : public std::runtime_error {
|
998 |
+
public:
|
999 |
+
using std::runtime_error::runtime_error;
|
1000 |
+
/// Set the error using the Python C API
|
1001 |
+
virtual void set_error() const = 0;
|
1002 |
+
};
|
1003 |
+
|
1004 |
+
#define PYBIND11_RUNTIME_EXCEPTION(name, type) \
|
1005 |
+
class PYBIND11_EXPORT_EXCEPTION name : public builtin_exception { \
|
1006 |
+
public: \
|
1007 |
+
using builtin_exception::builtin_exception; \
|
1008 |
+
name() : name("") {} \
|
1009 |
+
void set_error() const override { PyErr_SetString(type, what()); } \
|
1010 |
+
};
|
1011 |
+
|
1012 |
+
PYBIND11_RUNTIME_EXCEPTION(stop_iteration, PyExc_StopIteration)
|
1013 |
+
PYBIND11_RUNTIME_EXCEPTION(index_error, PyExc_IndexError)
|
1014 |
+
PYBIND11_RUNTIME_EXCEPTION(key_error, PyExc_KeyError)
|
1015 |
+
PYBIND11_RUNTIME_EXCEPTION(value_error, PyExc_ValueError)
|
1016 |
+
PYBIND11_RUNTIME_EXCEPTION(type_error, PyExc_TypeError)
|
1017 |
+
PYBIND11_RUNTIME_EXCEPTION(buffer_error, PyExc_BufferError)
|
1018 |
+
PYBIND11_RUNTIME_EXCEPTION(import_error, PyExc_ImportError)
|
1019 |
+
PYBIND11_RUNTIME_EXCEPTION(attribute_error, PyExc_AttributeError)
|
1020 |
+
PYBIND11_RUNTIME_EXCEPTION(cast_error, PyExc_RuntimeError) /// Thrown when pybind11::cast or
|
1021 |
+
/// handle::call fail due to a type
|
1022 |
+
/// casting error
|
1023 |
+
PYBIND11_RUNTIME_EXCEPTION(reference_cast_error, PyExc_RuntimeError) /// Used internally
|
1024 |
+
|
1025 |
+
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const char *reason) {
|
1026 |
+
assert(!PyErr_Occurred());
|
1027 |
+
throw std::runtime_error(reason);
|
1028 |
+
}
|
1029 |
+
[[noreturn]] PYBIND11_NOINLINE void pybind11_fail(const std::string &reason) {
|
1030 |
+
assert(!PyErr_Occurred());
|
1031 |
+
throw std::runtime_error(reason);
|
1032 |
+
}
|
1033 |
+
|
1034 |
+
template <typename T, typename SFINAE = void>
|
1035 |
+
struct format_descriptor {};
|
1036 |
+
|
1037 |
+
template <typename T>
|
1038 |
+
struct format_descriptor<
|
1039 |
+
T,
|
1040 |
+
detail::enable_if_t<detail::is_same_ignoring_cvref<T, PyObject *>::value>> {
|
1041 |
+
static constexpr const char c = 'O';
|
1042 |
+
static constexpr const char value[2] = {c, '\0'};
|
1043 |
+
static std::string format() { return std::string(1, c); }
|
1044 |
+
};
|
1045 |
+
|
1046 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1047 |
+
// Returns the index of the given type in the type char array below, and in the list in numpy.h
|
1048 |
+
// The order here is: bool; 8 ints ((signed,unsigned)x(8,16,32,64)bits); float,double,long double;
|
1049 |
+
// complex float,double,long double. Note that the long double types only participate when long
|
1050 |
+
// double is actually longer than double (it isn't under MSVC).
|
1051 |
+
// NB: not only the string below but also complex.h and numpy.h rely on this order.
|
1052 |
+
template <typename T, typename SFINAE = void>
|
1053 |
+
struct is_fmt_numeric {
|
1054 |
+
static constexpr bool value = false;
|
1055 |
+
};
|
1056 |
+
template <typename T>
|
1057 |
+
struct is_fmt_numeric<T, enable_if_t<std::is_arithmetic<T>::value>> {
|
1058 |
+
static constexpr bool value = true;
|
1059 |
+
static constexpr int index
|
1060 |
+
= std::is_same<T, bool>::value
|
1061 |
+
? 0
|
1062 |
+
: 1
|
1063 |
+
+ (std::is_integral<T>::value
|
1064 |
+
? detail::log2(sizeof(T)) * 2 + std::is_unsigned<T>::value
|
1065 |
+
: 8
|
1066 |
+
+ (std::is_same<T, double>::value ? 1
|
1067 |
+
: std::is_same<T, long double>::value ? 2
|
1068 |
+
: 0));
|
1069 |
+
};
|
1070 |
+
PYBIND11_NAMESPACE_END(detail)
|
1071 |
+
|
1072 |
+
template <typename T>
|
1073 |
+
struct format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>> {
|
1074 |
+
static constexpr const char c = "?bBhHiIqQfdg"[detail::is_fmt_numeric<T>::index];
|
1075 |
+
static constexpr const char value[2] = {c, '\0'};
|
1076 |
+
static std::string format() { return std::string(1, c); }
|
1077 |
+
};
|
1078 |
+
|
1079 |
+
#if !defined(PYBIND11_CPP17)
|
1080 |
+
|
1081 |
+
template <typename T>
|
1082 |
+
constexpr const char
|
1083 |
+
format_descriptor<T, detail::enable_if_t<std::is_arithmetic<T>::value>>::value[2];
|
1084 |
+
|
1085 |
+
#endif
|
1086 |
+
|
1087 |
+
/// RAII wrapper that temporarily clears any Python error state
|
1088 |
+
struct error_scope {
|
1089 |
+
PyObject *type, *value, *trace;
|
1090 |
+
error_scope() { PyErr_Fetch(&type, &value, &trace); }
|
1091 |
+
error_scope(const error_scope &) = delete;
|
1092 |
+
error_scope &operator=(const error_scope &) = delete;
|
1093 |
+
~error_scope() { PyErr_Restore(type, value, trace); }
|
1094 |
+
};
|
1095 |
+
|
1096 |
+
/// Dummy destructor wrapper that can be used to expose classes with a private destructor
|
1097 |
+
struct nodelete {
|
1098 |
+
template <typename T>
|
1099 |
+
void operator()(T *) {}
|
1100 |
+
};
|
1101 |
+
|
1102 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1103 |
+
template <typename... Args>
|
1104 |
+
struct overload_cast_impl {
|
1105 |
+
template <typename Return>
|
1106 |
+
constexpr auto operator()(Return (*pf)(Args...)) const noexcept -> decltype(pf) {
|
1107 |
+
return pf;
|
1108 |
+
}
|
1109 |
+
|
1110 |
+
template <typename Return, typename Class>
|
1111 |
+
constexpr auto operator()(Return (Class::*pmf)(Args...), std::false_type = {}) const noexcept
|
1112 |
+
-> decltype(pmf) {
|
1113 |
+
return pmf;
|
1114 |
+
}
|
1115 |
+
|
1116 |
+
template <typename Return, typename Class>
|
1117 |
+
constexpr auto operator()(Return (Class::*pmf)(Args...) const, std::true_type) const noexcept
|
1118 |
+
-> decltype(pmf) {
|
1119 |
+
return pmf;
|
1120 |
+
}
|
1121 |
+
};
|
1122 |
+
PYBIND11_NAMESPACE_END(detail)
|
1123 |
+
|
1124 |
+
// overload_cast requires variable templates: C++14
|
1125 |
+
#if defined(PYBIND11_CPP14)
|
1126 |
+
# define PYBIND11_OVERLOAD_CAST 1
|
1127 |
+
/// Syntax sugar for resolving overloaded function pointers:
|
1128 |
+
/// - regular: static_cast<Return (Class::*)(Arg0, Arg1, Arg2)>(&Class::func)
|
1129 |
+
/// - sweet: overload_cast<Arg0, Arg1, Arg2>(&Class::func)
|
1130 |
+
template <typename... Args>
|
1131 |
+
static constexpr detail::overload_cast_impl<Args...> overload_cast{};
|
1132 |
+
#endif
|
1133 |
+
|
1134 |
+
/// Const member function selector for overload_cast
|
1135 |
+
/// - regular: static_cast<Return (Class::*)(Arg) const>(&Class::func)
|
1136 |
+
/// - sweet: overload_cast<Arg>(&Class::func, const_)
|
1137 |
+
static constexpr auto const_ = std::true_type{};
|
1138 |
+
|
1139 |
+
#if !defined(PYBIND11_CPP14) // no overload_cast: providing something that static_assert-fails:
|
1140 |
+
template <typename... Args>
|
1141 |
+
struct overload_cast {
|
1142 |
+
static_assert(detail::deferred_t<std::false_type, Args...>::value,
|
1143 |
+
"pybind11::overload_cast<...> requires compiling in C++14 mode");
|
1144 |
+
};
|
1145 |
+
#endif // overload_cast
|
1146 |
+
|
1147 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1148 |
+
|
1149 |
+
// Adaptor for converting arbitrary container arguments into a vector; implicitly convertible from
|
1150 |
+
// any standard container (or C-style array) supporting std::begin/std::end, any singleton
|
1151 |
+
// arithmetic type (if T is arithmetic), or explicitly constructible from an iterator pair.
|
1152 |
+
template <typename T>
|
1153 |
+
class any_container {
|
1154 |
+
std::vector<T> v;
|
1155 |
+
|
1156 |
+
public:
|
1157 |
+
any_container() = default;
|
1158 |
+
|
1159 |
+
// Can construct from a pair of iterators
|
1160 |
+
template <typename It, typename = enable_if_t<is_input_iterator<It>::value>>
|
1161 |
+
any_container(It first, It last) : v(first, last) {}
|
1162 |
+
|
1163 |
+
// Implicit conversion constructor from any arbitrary container type
|
1164 |
+
// with values convertible to T
|
1165 |
+
template <typename Container,
|
1166 |
+
typename = enable_if_t<
|
1167 |
+
std::is_convertible<decltype(*std::begin(std::declval<const Container &>())),
|
1168 |
+
T>::value>>
|
1169 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
1170 |
+
any_container(const Container &c) : any_container(std::begin(c), std::end(c)) {}
|
1171 |
+
|
1172 |
+
// initializer_list's aren't deducible, so don't get matched by the above template;
|
1173 |
+
// we need this to explicitly allow implicit conversion from one:
|
1174 |
+
template <typename TIn, typename = enable_if_t<std::is_convertible<TIn, T>::value>>
|
1175 |
+
any_container(const std::initializer_list<TIn> &c) : any_container(c.begin(), c.end()) {}
|
1176 |
+
|
1177 |
+
// Avoid copying if given an rvalue vector of the correct type.
|
1178 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
1179 |
+
any_container(std::vector<T> &&v) : v(std::move(v)) {}
|
1180 |
+
|
1181 |
+
// Moves the vector out of an rvalue any_container
|
1182 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
1183 |
+
operator std::vector<T> &&() && { return std::move(v); }
|
1184 |
+
|
1185 |
+
// Dereferencing obtains a reference to the underlying vector
|
1186 |
+
std::vector<T> &operator*() { return v; }
|
1187 |
+
const std::vector<T> &operator*() const { return v; }
|
1188 |
+
|
1189 |
+
// -> lets you call methods on the underlying vector
|
1190 |
+
std::vector<T> *operator->() { return &v; }
|
1191 |
+
const std::vector<T> *operator->() const { return &v; }
|
1192 |
+
};
|
1193 |
+
|
1194 |
+
// Forward-declaration; see detail/class.h
|
1195 |
+
std::string get_fully_qualified_tp_name(PyTypeObject *);
|
1196 |
+
|
1197 |
+
template <typename T>
|
1198 |
+
inline static std::shared_ptr<T>
|
1199 |
+
try_get_shared_from_this(std::enable_shared_from_this<T> *holder_value_ptr) {
|
1200 |
+
// Pre C++17, this code path exploits undefined behavior, but is known to work on many platforms.
|
1201 |
+
// Use at your own risk!
|
1202 |
+
// See also https://en.cppreference.com/w/cpp/memory/enable_shared_from_this, and in particular
|
1203 |
+
// the `std::shared_ptr<Good> gp1 = not_so_good.getptr();` and `try`-`catch` parts of the example.
|
1204 |
+
#if defined(__cpp_lib_enable_shared_from_this) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
|
1205 |
+
return holder_value_ptr->weak_from_this().lock();
|
1206 |
+
#else
|
1207 |
+
try {
|
1208 |
+
return holder_value_ptr->shared_from_this();
|
1209 |
+
} catch (const std::bad_weak_ptr &) {
|
1210 |
+
return nullptr;
|
1211 |
+
}
|
1212 |
+
#endif
|
1213 |
+
}
|
1214 |
+
|
1215 |
+
// For silencing "unused" compiler warnings in special situations.
|
1216 |
+
template <typename... Args>
|
1217 |
+
#if defined(_MSC_VER) && _MSC_VER < 1920 // MSVC 2017
|
1218 |
+
constexpr
|
1219 |
+
#endif
|
1220 |
+
inline void
|
1221 |
+
silence_unused_warnings(Args &&...) {
|
1222 |
+
}
|
1223 |
+
|
1224 |
+
// MSVC warning C4100: Unreferenced formal parameter
|
1225 |
+
#if defined(_MSC_VER) && _MSC_VER <= 1916
|
1226 |
+
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...) \
|
1227 |
+
detail::silence_unused_warnings(__VA_ARGS__)
|
1228 |
+
#else
|
1229 |
+
# define PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(...)
|
1230 |
+
#endif
|
1231 |
+
|
1232 |
+
// GCC -Wunused-but-set-parameter All GCC versions (as of July 2021).
|
1233 |
+
#if defined(__GNUG__) && !defined(__clang__) && !defined(__INTEL_COMPILER)
|
1234 |
+
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...) \
|
1235 |
+
detail::silence_unused_warnings(__VA_ARGS__)
|
1236 |
+
#else
|
1237 |
+
# define PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(...)
|
1238 |
+
#endif
|
1239 |
+
|
1240 |
+
#if defined(__clang__) \
|
1241 |
+
&& (defined(__apple_build_version__) /* AppleClang 13.0.0.13000029 was the only data point \
|
1242 |
+
available. */ \
|
1243 |
+
|| (__clang_major__ >= 7 \
|
1244 |
+
&& __clang_major__ <= 12) /* Clang 3, 5, 13, 14, 15 do not generate the warning. */ \
|
1245 |
+
)
|
1246 |
+
# define PYBIND11_DETECTED_CLANG_WITH_MISLEADING_CALL_STD_MOVE_EXPLICITLY_WARNING
|
1247 |
+
// Example:
|
1248 |
+
// tests/test_kwargs_and_defaults.cpp:46:68: error: local variable 'args' will be copied despite
|
1249 |
+
// being returned by name [-Werror,-Wreturn-std-move]
|
1250 |
+
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
|
1251 |
+
// ^~~~
|
1252 |
+
// test_kwargs_and_defaults.cpp:46:68: note: call 'std::move' explicitly to avoid copying
|
1253 |
+
// m.def("args_function", [](py::args args) -> py::tuple { return args; });
|
1254 |
+
// ^~~~
|
1255 |
+
// std::move(args)
|
1256 |
+
#endif
|
1257 |
+
|
1258 |
+
// Pybind offers detailed error messages by default for all builts that are debug (through the
|
1259 |
+
// negation of NDEBUG). This can also be manually enabled by users, for any builds, through
|
1260 |
+
// defining PYBIND11_DETAILED_ERROR_MESSAGES. This information is primarily useful for those
|
1261 |
+
// who are writing (as opposed to merely using) libraries that use pybind11.
|
1262 |
+
#if !defined(PYBIND11_DETAILED_ERROR_MESSAGES) && !defined(NDEBUG)
|
1263 |
+
# define PYBIND11_DETAILED_ERROR_MESSAGES
|
1264 |
+
#endif
|
1265 |
+
|
1266 |
+
PYBIND11_NAMESPACE_END(detail)
|
1267 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/descr.h
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/descr.h: Helper type for concatenating type signatures at compile time
|
3 |
+
|
4 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "common.h"
|
13 |
+
|
14 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
15 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
16 |
+
|
17 |
+
#if !defined(_MSC_VER)
|
18 |
+
# define PYBIND11_DESCR_CONSTEXPR static constexpr
|
19 |
+
#else
|
20 |
+
# define PYBIND11_DESCR_CONSTEXPR const
|
21 |
+
#endif
|
22 |
+
|
23 |
+
/* Concatenate type signatures at compile time */
|
24 |
+
template <size_t N, typename... Ts>
|
25 |
+
struct descr {
|
26 |
+
char text[N + 1]{'\0'};
|
27 |
+
|
28 |
+
constexpr descr() = default;
|
29 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
30 |
+
constexpr descr(char const (&s)[N + 1]) : descr(s, make_index_sequence<N>()) {}
|
31 |
+
|
32 |
+
template <size_t... Is>
|
33 |
+
constexpr descr(char const (&s)[N + 1], index_sequence<Is...>) : text{s[Is]..., '\0'} {}
|
34 |
+
|
35 |
+
template <typename... Chars>
|
36 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
37 |
+
constexpr descr(char c, Chars... cs) : text{c, static_cast<char>(cs)..., '\0'} {}
|
38 |
+
|
39 |
+
static constexpr std::array<const std::type_info *, sizeof...(Ts) + 1> types() {
|
40 |
+
return {{&typeid(Ts)..., nullptr}};
|
41 |
+
}
|
42 |
+
};
|
43 |
+
|
44 |
+
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2, size_t... Is1, size_t... Is2>
|
45 |
+
constexpr descr<N1 + N2, Ts1..., Ts2...> plus_impl(const descr<N1, Ts1...> &a,
|
46 |
+
const descr<N2, Ts2...> &b,
|
47 |
+
index_sequence<Is1...>,
|
48 |
+
index_sequence<Is2...>) {
|
49 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(b);
|
50 |
+
return {a.text[Is1]..., b.text[Is2]...};
|
51 |
+
}
|
52 |
+
|
53 |
+
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
|
54 |
+
constexpr descr<N1 + N2, Ts1..., Ts2...> operator+(const descr<N1, Ts1...> &a,
|
55 |
+
const descr<N2, Ts2...> &b) {
|
56 |
+
return plus_impl(a, b, make_index_sequence<N1>(), make_index_sequence<N2>());
|
57 |
+
}
|
58 |
+
|
59 |
+
template <size_t N>
|
60 |
+
constexpr descr<N - 1> const_name(char const (&text)[N]) {
|
61 |
+
return descr<N - 1>(text);
|
62 |
+
}
|
63 |
+
constexpr descr<0> const_name(char const (&)[1]) { return {}; }
|
64 |
+
|
65 |
+
template <size_t Rem, size_t... Digits>
|
66 |
+
struct int_to_str : int_to_str<Rem / 10, Rem % 10, Digits...> {};
|
67 |
+
template <size_t... Digits>
|
68 |
+
struct int_to_str<0, Digits...> {
|
69 |
+
// WARNING: This only works with C++17 or higher.
|
70 |
+
static constexpr auto digits = descr<sizeof...(Digits)>(('0' + Digits)...);
|
71 |
+
};
|
72 |
+
|
73 |
+
// Ternary description (like std::conditional)
|
74 |
+
template <bool B, size_t N1, size_t N2>
|
75 |
+
constexpr enable_if_t<B, descr<N1 - 1>> const_name(char const (&text1)[N1], char const (&)[N2]) {
|
76 |
+
return const_name(text1);
|
77 |
+
}
|
78 |
+
template <bool B, size_t N1, size_t N2>
|
79 |
+
constexpr enable_if_t<!B, descr<N2 - 1>> const_name(char const (&)[N1], char const (&text2)[N2]) {
|
80 |
+
return const_name(text2);
|
81 |
+
}
|
82 |
+
|
83 |
+
template <bool B, typename T1, typename T2>
|
84 |
+
constexpr enable_if_t<B, T1> const_name(const T1 &d, const T2 &) {
|
85 |
+
return d;
|
86 |
+
}
|
87 |
+
template <bool B, typename T1, typename T2>
|
88 |
+
constexpr enable_if_t<!B, T2> const_name(const T1 &, const T2 &d) {
|
89 |
+
return d;
|
90 |
+
}
|
91 |
+
|
92 |
+
template <size_t Size>
|
93 |
+
auto constexpr const_name() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
|
94 |
+
return int_to_str<Size / 10, Size % 10>::digits;
|
95 |
+
}
|
96 |
+
|
97 |
+
template <typename Type>
|
98 |
+
constexpr descr<1, Type> const_name() {
|
99 |
+
return {'%'};
|
100 |
+
}
|
101 |
+
|
102 |
+
// If "_" is defined as a macro, py::detail::_ cannot be provided.
|
103 |
+
// It is therefore best to use py::detail::const_name universally.
|
104 |
+
// This block is for backward compatibility only.
|
105 |
+
// (The const_name code is repeated to avoid introducing a "_" #define ourselves.)
|
106 |
+
#ifndef _
|
107 |
+
# define PYBIND11_DETAIL_UNDERSCORE_BACKWARD_COMPATIBILITY
|
108 |
+
template <size_t N>
|
109 |
+
constexpr descr<N - 1> _(char const (&text)[N]) {
|
110 |
+
return const_name<N>(text);
|
111 |
+
}
|
112 |
+
template <bool B, size_t N1, size_t N2>
|
113 |
+
constexpr enable_if_t<B, descr<N1 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
|
114 |
+
return const_name<B, N1, N2>(text1, text2);
|
115 |
+
}
|
116 |
+
template <bool B, size_t N1, size_t N2>
|
117 |
+
constexpr enable_if_t<!B, descr<N2 - 1>> _(char const (&text1)[N1], char const (&text2)[N2]) {
|
118 |
+
return const_name<B, N1, N2>(text1, text2);
|
119 |
+
}
|
120 |
+
template <bool B, typename T1, typename T2>
|
121 |
+
constexpr enable_if_t<B, T1> _(const T1 &d1, const T2 &d2) {
|
122 |
+
return const_name<B, T1, T2>(d1, d2);
|
123 |
+
}
|
124 |
+
template <bool B, typename T1, typename T2>
|
125 |
+
constexpr enable_if_t<!B, T2> _(const T1 &d1, const T2 &d2) {
|
126 |
+
return const_name<B, T1, T2>(d1, d2);
|
127 |
+
}
|
128 |
+
|
129 |
+
template <size_t Size>
|
130 |
+
auto constexpr _() -> remove_cv_t<decltype(int_to_str<Size / 10, Size % 10>::digits)> {
|
131 |
+
return const_name<Size>();
|
132 |
+
}
|
133 |
+
template <typename Type>
|
134 |
+
constexpr descr<1, Type> _() {
|
135 |
+
return const_name<Type>();
|
136 |
+
}
|
137 |
+
#endif // #ifndef _
|
138 |
+
|
139 |
+
constexpr descr<0> concat() { return {}; }
|
140 |
+
|
141 |
+
template <size_t N, typename... Ts>
|
142 |
+
constexpr descr<N, Ts...> concat(const descr<N, Ts...> &descr) {
|
143 |
+
return descr;
|
144 |
+
}
|
145 |
+
|
146 |
+
#ifdef __cpp_fold_expressions
|
147 |
+
template <size_t N1, size_t N2, typename... Ts1, typename... Ts2>
|
148 |
+
constexpr descr<N1 + N2 + 2, Ts1..., Ts2...> operator,(const descr<N1, Ts1...> &a,
|
149 |
+
const descr<N2, Ts2...> &b) {
|
150 |
+
return a + const_name(", ") + b;
|
151 |
+
}
|
152 |
+
|
153 |
+
template <size_t N, typename... Ts, typename... Args>
|
154 |
+
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args) {
|
155 |
+
return (d, ..., args);
|
156 |
+
}
|
157 |
+
#else
|
158 |
+
template <size_t N, typename... Ts, typename... Args>
|
159 |
+
constexpr auto concat(const descr<N, Ts...> &d, const Args &...args)
|
160 |
+
-> decltype(std::declval<descr<N + 2, Ts...>>() + concat(args...)) {
|
161 |
+
return d + const_name(", ") + concat(args...);
|
162 |
+
}
|
163 |
+
#endif
|
164 |
+
|
165 |
+
template <size_t N, typename... Ts>
|
166 |
+
constexpr descr<N + 2, Ts...> type_descr(const descr<N, Ts...> &descr) {
|
167 |
+
return const_name("{") + descr + const_name("}");
|
168 |
+
}
|
169 |
+
|
170 |
+
PYBIND11_NAMESPACE_END(detail)
|
171 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/init.h
ADDED
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/init.h: init factory function implementation and support code.
|
3 |
+
|
4 |
+
Copyright (c) 2017 Jason Rhinelander <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "class.h"
|
13 |
+
|
14 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
15 |
+
|
16 |
+
PYBIND11_WARNING_DISABLE_MSVC(4127)
|
17 |
+
|
18 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
19 |
+
|
20 |
+
template <>
|
21 |
+
class type_caster<value_and_holder> {
|
22 |
+
public:
|
23 |
+
bool load(handle h, bool) {
|
24 |
+
value = reinterpret_cast<value_and_holder *>(h.ptr());
|
25 |
+
return true;
|
26 |
+
}
|
27 |
+
|
28 |
+
template <typename>
|
29 |
+
using cast_op_type = value_and_holder &;
|
30 |
+
explicit operator value_and_holder &() { return *value; }
|
31 |
+
static constexpr auto name = const_name<value_and_holder>();
|
32 |
+
|
33 |
+
private:
|
34 |
+
value_and_holder *value = nullptr;
|
35 |
+
};
|
36 |
+
|
37 |
+
PYBIND11_NAMESPACE_BEGIN(initimpl)
|
38 |
+
|
39 |
+
inline void no_nullptr(void *ptr) {
|
40 |
+
if (!ptr) {
|
41 |
+
throw type_error("pybind11::init(): factory function returned nullptr");
|
42 |
+
}
|
43 |
+
}
|
44 |
+
|
45 |
+
// Implementing functions for all forms of py::init<...> and py::init(...)
|
46 |
+
template <typename Class>
|
47 |
+
using Cpp = typename Class::type;
|
48 |
+
template <typename Class>
|
49 |
+
using Alias = typename Class::type_alias;
|
50 |
+
template <typename Class>
|
51 |
+
using Holder = typename Class::holder_type;
|
52 |
+
|
53 |
+
template <typename Class>
|
54 |
+
using is_alias_constructible = std::is_constructible<Alias<Class>, Cpp<Class> &&>;
|
55 |
+
|
56 |
+
// Takes a Cpp pointer and returns true if it actually is a polymorphic Alias instance.
|
57 |
+
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
|
58 |
+
bool is_alias(Cpp<Class> *ptr) {
|
59 |
+
return dynamic_cast<Alias<Class> *>(ptr) != nullptr;
|
60 |
+
}
|
61 |
+
// Failing fallback version of the above for a no-alias class (always returns false)
|
62 |
+
template <typename /*Class*/>
|
63 |
+
constexpr bool is_alias(void *) {
|
64 |
+
return false;
|
65 |
+
}
|
66 |
+
|
67 |
+
// Constructs and returns a new object; if the given arguments don't map to a constructor, we fall
|
68 |
+
// back to brace aggregate initialization so that for aggregate initialization can be used with
|
69 |
+
// py::init, e.g. `py::init<int, int>` to initialize a `struct T { int a; int b; }`. For
|
70 |
+
// non-aggregate types, we need to use an ordinary T(...) constructor (invoking as `T{...}` usually
|
71 |
+
// works, but will not do the expected thing when `T` has an `initializer_list<T>` constructor).
|
72 |
+
template <typename Class,
|
73 |
+
typename... Args,
|
74 |
+
detail::enable_if_t<std::is_constructible<Class, Args...>::value, int> = 0>
|
75 |
+
inline Class *construct_or_initialize(Args &&...args) {
|
76 |
+
return new Class(std::forward<Args>(args)...);
|
77 |
+
}
|
78 |
+
template <typename Class,
|
79 |
+
typename... Args,
|
80 |
+
detail::enable_if_t<!std::is_constructible<Class, Args...>::value, int> = 0>
|
81 |
+
inline Class *construct_or_initialize(Args &&...args) {
|
82 |
+
return new Class{std::forward<Args>(args)...};
|
83 |
+
}
|
84 |
+
|
85 |
+
// Attempts to constructs an alias using a `Alias(Cpp &&)` constructor. This allows types with
|
86 |
+
// an alias to provide only a single Cpp factory function as long as the Alias can be
|
87 |
+
// constructed from an rvalue reference of the base Cpp type. This means that Alias classes
|
88 |
+
// can, when appropriate, simply define a `Alias(Cpp &&)` constructor rather than needing to
|
89 |
+
// inherit all the base class constructors.
|
90 |
+
template <typename Class>
|
91 |
+
void construct_alias_from_cpp(std::true_type /*is_alias_constructible*/,
|
92 |
+
value_and_holder &v_h,
|
93 |
+
Cpp<Class> &&base) {
|
94 |
+
v_h.value_ptr() = new Alias<Class>(std::move(base));
|
95 |
+
}
|
96 |
+
template <typename Class>
|
97 |
+
[[noreturn]] void construct_alias_from_cpp(std::false_type /*!is_alias_constructible*/,
|
98 |
+
value_and_holder &,
|
99 |
+
Cpp<Class> &&) {
|
100 |
+
throw type_error("pybind11::init(): unable to convert returned instance to required "
|
101 |
+
"alias class: no `Alias<Class>(Class &&)` constructor available");
|
102 |
+
}
|
103 |
+
|
104 |
+
// Error-generating fallback for factories that don't match one of the below construction
|
105 |
+
// mechanisms.
|
106 |
+
template <typename Class>
|
107 |
+
void construct(...) {
|
108 |
+
static_assert(!std::is_same<Class, Class>::value /* always false */,
|
109 |
+
"pybind11::init(): init function must return a compatible pointer, "
|
110 |
+
"holder, or value");
|
111 |
+
}
|
112 |
+
|
113 |
+
// Pointer return v1: the factory function returns a class pointer for a registered class.
|
114 |
+
// If we don't need an alias (because this class doesn't have one, or because the final type is
|
115 |
+
// inherited on the Python side) we can simply take over ownership. Otherwise we need to try to
|
116 |
+
// construct an Alias from the returned base instance.
|
117 |
+
template <typename Class>
|
118 |
+
void construct(value_and_holder &v_h, Cpp<Class> *ptr, bool need_alias) {
|
119 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
|
120 |
+
no_nullptr(ptr);
|
121 |
+
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
|
122 |
+
// We're going to try to construct an alias by moving the cpp type. Whether or not
|
123 |
+
// that succeeds, we still need to destroy the original cpp pointer (either the
|
124 |
+
// moved away leftover, if the alias construction works, or the value itself if we
|
125 |
+
// throw an error), but we can't just call `delete ptr`: it might have a special
|
126 |
+
// deleter, or might be shared_from_this. So we construct a holder around it as if
|
127 |
+
// it was a normal instance, then steal the holder away into a local variable; thus
|
128 |
+
// the holder and destruction happens when we leave the C++ scope, and the holder
|
129 |
+
// class gets to handle the destruction however it likes.
|
130 |
+
v_h.value_ptr() = ptr;
|
131 |
+
v_h.set_instance_registered(true); // To prevent init_instance from registering it
|
132 |
+
v_h.type->init_instance(v_h.inst, nullptr); // Set up the holder
|
133 |
+
Holder<Class> temp_holder(std::move(v_h.holder<Holder<Class>>())); // Steal the holder
|
134 |
+
v_h.type->dealloc(v_h); // Destroys the moved-out holder remains, resets value ptr to null
|
135 |
+
v_h.set_instance_registered(false);
|
136 |
+
|
137 |
+
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(*ptr));
|
138 |
+
} else {
|
139 |
+
// Otherwise the type isn't inherited, so we don't need an Alias
|
140 |
+
v_h.value_ptr() = ptr;
|
141 |
+
}
|
142 |
+
}
|
143 |
+
|
144 |
+
// Pointer return v2: a factory that always returns an alias instance ptr. We simply take over
|
145 |
+
// ownership of the pointer.
|
146 |
+
template <typename Class, enable_if_t<Class::has_alias, int> = 0>
|
147 |
+
void construct(value_and_holder &v_h, Alias<Class> *alias_ptr, bool) {
|
148 |
+
no_nullptr(alias_ptr);
|
149 |
+
v_h.value_ptr() = static_cast<Cpp<Class> *>(alias_ptr);
|
150 |
+
}
|
151 |
+
|
152 |
+
// Holder return: copy its pointer, and move or copy the returned holder into the new instance's
|
153 |
+
// holder. This also handles types like std::shared_ptr<T> and std::unique_ptr<T> where T is a
|
154 |
+
// derived type (through those holder's implicit conversion from derived class holder
|
155 |
+
// constructors).
|
156 |
+
template <typename Class>
|
157 |
+
void construct(value_and_holder &v_h, Holder<Class> holder, bool need_alias) {
|
158 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
|
159 |
+
auto *ptr = holder_helper<Holder<Class>>::get(holder);
|
160 |
+
no_nullptr(ptr);
|
161 |
+
// If we need an alias, check that the held pointer is actually an alias instance
|
162 |
+
if (Class::has_alias && need_alias && !is_alias<Class>(ptr)) {
|
163 |
+
throw type_error("pybind11::init(): construction failed: returned holder-wrapped instance "
|
164 |
+
"is not an alias instance");
|
165 |
+
}
|
166 |
+
|
167 |
+
v_h.value_ptr() = ptr;
|
168 |
+
v_h.type->init_instance(v_h.inst, &holder);
|
169 |
+
}
|
170 |
+
|
171 |
+
// return-by-value version 1: returning a cpp class by value. If the class has an alias and an
|
172 |
+
// alias is required the alias must have an `Alias(Cpp &&)` constructor so that we can construct
|
173 |
+
// the alias from the base when needed (i.e. because of Python-side inheritance). When we don't
|
174 |
+
// need it, we simply move-construct the cpp value into a new instance.
|
175 |
+
template <typename Class>
|
176 |
+
void construct(value_and_holder &v_h, Cpp<Class> &&result, bool need_alias) {
|
177 |
+
PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(need_alias);
|
178 |
+
static_assert(is_move_constructible<Cpp<Class>>::value,
|
179 |
+
"pybind11::init() return-by-value factory function requires a movable class");
|
180 |
+
if (Class::has_alias && need_alias) {
|
181 |
+
construct_alias_from_cpp<Class>(is_alias_constructible<Class>{}, v_h, std::move(result));
|
182 |
+
} else {
|
183 |
+
v_h.value_ptr() = new Cpp<Class>(std::move(result));
|
184 |
+
}
|
185 |
+
}
|
186 |
+
|
187 |
+
// return-by-value version 2: returning a value of the alias type itself. We move-construct an
|
188 |
+
// Alias instance (even if no the python-side inheritance is involved). The is intended for
|
189 |
+
// cases where Alias initialization is always desired.
|
190 |
+
template <typename Class>
|
191 |
+
void construct(value_and_holder &v_h, Alias<Class> &&result, bool) {
|
192 |
+
static_assert(
|
193 |
+
is_move_constructible<Alias<Class>>::value,
|
194 |
+
"pybind11::init() return-by-alias-value factory function requires a movable alias class");
|
195 |
+
v_h.value_ptr() = new Alias<Class>(std::move(result));
|
196 |
+
}
|
197 |
+
|
198 |
+
// Implementing class for py::init<...>()
|
199 |
+
template <typename... Args>
|
200 |
+
struct constructor {
|
201 |
+
template <typename Class, typename... Extra, enable_if_t<!Class::has_alias, int> = 0>
|
202 |
+
static void execute(Class &cl, const Extra &...extra) {
|
203 |
+
cl.def(
|
204 |
+
"__init__",
|
205 |
+
[](value_and_holder &v_h, Args... args) {
|
206 |
+
v_h.value_ptr() = construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
|
207 |
+
},
|
208 |
+
is_new_style_constructor(),
|
209 |
+
extra...);
|
210 |
+
}
|
211 |
+
|
212 |
+
template <
|
213 |
+
typename Class,
|
214 |
+
typename... Extra,
|
215 |
+
enable_if_t<Class::has_alias && std::is_constructible<Cpp<Class>, Args...>::value, int>
|
216 |
+
= 0>
|
217 |
+
static void execute(Class &cl, const Extra &...extra) {
|
218 |
+
cl.def(
|
219 |
+
"__init__",
|
220 |
+
[](value_and_holder &v_h, Args... args) {
|
221 |
+
if (Py_TYPE(v_h.inst) == v_h.type->type) {
|
222 |
+
v_h.value_ptr()
|
223 |
+
= construct_or_initialize<Cpp<Class>>(std::forward<Args>(args)...);
|
224 |
+
} else {
|
225 |
+
v_h.value_ptr()
|
226 |
+
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
|
227 |
+
}
|
228 |
+
},
|
229 |
+
is_new_style_constructor(),
|
230 |
+
extra...);
|
231 |
+
}
|
232 |
+
|
233 |
+
template <
|
234 |
+
typename Class,
|
235 |
+
typename... Extra,
|
236 |
+
enable_if_t<Class::has_alias && !std::is_constructible<Cpp<Class>, Args...>::value, int>
|
237 |
+
= 0>
|
238 |
+
static void execute(Class &cl, const Extra &...extra) {
|
239 |
+
cl.def(
|
240 |
+
"__init__",
|
241 |
+
[](value_and_holder &v_h, Args... args) {
|
242 |
+
v_h.value_ptr()
|
243 |
+
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
|
244 |
+
},
|
245 |
+
is_new_style_constructor(),
|
246 |
+
extra...);
|
247 |
+
}
|
248 |
+
};
|
249 |
+
|
250 |
+
// Implementing class for py::init_alias<...>()
|
251 |
+
template <typename... Args>
|
252 |
+
struct alias_constructor {
|
253 |
+
template <
|
254 |
+
typename Class,
|
255 |
+
typename... Extra,
|
256 |
+
enable_if_t<Class::has_alias && std::is_constructible<Alias<Class>, Args...>::value, int>
|
257 |
+
= 0>
|
258 |
+
static void execute(Class &cl, const Extra &...extra) {
|
259 |
+
cl.def(
|
260 |
+
"__init__",
|
261 |
+
[](value_and_holder &v_h, Args... args) {
|
262 |
+
v_h.value_ptr()
|
263 |
+
= construct_or_initialize<Alias<Class>>(std::forward<Args>(args)...);
|
264 |
+
},
|
265 |
+
is_new_style_constructor(),
|
266 |
+
extra...);
|
267 |
+
}
|
268 |
+
};
|
269 |
+
|
270 |
+
// Implementation class for py::init(Func) and py::init(Func, AliasFunc)
|
271 |
+
template <typename CFunc,
|
272 |
+
typename AFunc = void_type (*)(),
|
273 |
+
typename = function_signature_t<CFunc>,
|
274 |
+
typename = function_signature_t<AFunc>>
|
275 |
+
struct factory;
|
276 |
+
|
277 |
+
// Specialization for py::init(Func)
|
278 |
+
template <typename Func, typename Return, typename... Args>
|
279 |
+
struct factory<Func, void_type (*)(), Return(Args...)> {
|
280 |
+
remove_reference_t<Func> class_factory;
|
281 |
+
|
282 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
283 |
+
factory(Func &&f) : class_factory(std::forward<Func>(f)) {}
|
284 |
+
|
285 |
+
// The given class either has no alias or has no separate alias factory;
|
286 |
+
// this always constructs the class itself. If the class is registered with an alias
|
287 |
+
// type and an alias instance is needed (i.e. because the final type is a Python class
|
288 |
+
// inheriting from the C++ type) the returned value needs to either already be an alias
|
289 |
+
// instance, or the alias needs to be constructible from a `Class &&` argument.
|
290 |
+
template <typename Class, typename... Extra>
|
291 |
+
void execute(Class &cl, const Extra &...extra) && {
|
292 |
+
#if defined(PYBIND11_CPP14)
|
293 |
+
cl.def(
|
294 |
+
"__init__",
|
295 |
+
[func = std::move(class_factory)]
|
296 |
+
#else
|
297 |
+
auto &func = class_factory;
|
298 |
+
cl.def(
|
299 |
+
"__init__",
|
300 |
+
[func]
|
301 |
+
#endif
|
302 |
+
(value_and_holder &v_h, Args... args) {
|
303 |
+
construct<Class>(
|
304 |
+
v_h, func(std::forward<Args>(args)...), Py_TYPE(v_h.inst) != v_h.type->type);
|
305 |
+
},
|
306 |
+
is_new_style_constructor(),
|
307 |
+
extra...);
|
308 |
+
}
|
309 |
+
};
|
310 |
+
|
311 |
+
// Specialization for py::init(Func, AliasFunc)
|
312 |
+
template <typename CFunc,
|
313 |
+
typename AFunc,
|
314 |
+
typename CReturn,
|
315 |
+
typename... CArgs,
|
316 |
+
typename AReturn,
|
317 |
+
typename... AArgs>
|
318 |
+
struct factory<CFunc, AFunc, CReturn(CArgs...), AReturn(AArgs...)> {
|
319 |
+
static_assert(sizeof...(CArgs) == sizeof...(AArgs),
|
320 |
+
"pybind11::init(class_factory, alias_factory): class and alias factories "
|
321 |
+
"must have identical argument signatures");
|
322 |
+
static_assert(all_of<std::is_same<CArgs, AArgs>...>::value,
|
323 |
+
"pybind11::init(class_factory, alias_factory): class and alias factories "
|
324 |
+
"must have identical argument signatures");
|
325 |
+
|
326 |
+
remove_reference_t<CFunc> class_factory;
|
327 |
+
remove_reference_t<AFunc> alias_factory;
|
328 |
+
|
329 |
+
factory(CFunc &&c, AFunc &&a)
|
330 |
+
: class_factory(std::forward<CFunc>(c)), alias_factory(std::forward<AFunc>(a)) {}
|
331 |
+
|
332 |
+
// The class factory is called when the `self` type passed to `__init__` is the direct
|
333 |
+
// class (i.e. not inherited), the alias factory when `self` is a Python-side subtype.
|
334 |
+
template <typename Class, typename... Extra>
|
335 |
+
void execute(Class &cl, const Extra &...extra) && {
|
336 |
+
static_assert(Class::has_alias,
|
337 |
+
"The two-argument version of `py::init()` can "
|
338 |
+
"only be used if the class has an alias");
|
339 |
+
#if defined(PYBIND11_CPP14)
|
340 |
+
cl.def(
|
341 |
+
"__init__",
|
342 |
+
[class_func = std::move(class_factory), alias_func = std::move(alias_factory)]
|
343 |
+
#else
|
344 |
+
auto &class_func = class_factory;
|
345 |
+
auto &alias_func = alias_factory;
|
346 |
+
cl.def(
|
347 |
+
"__init__",
|
348 |
+
[class_func, alias_func]
|
349 |
+
#endif
|
350 |
+
(value_and_holder &v_h, CArgs... args) {
|
351 |
+
if (Py_TYPE(v_h.inst) == v_h.type->type) {
|
352 |
+
// If the instance type equals the registered type we don't have inheritance,
|
353 |
+
// so don't need the alias and can construct using the class function:
|
354 |
+
construct<Class>(v_h, class_func(std::forward<CArgs>(args)...), false);
|
355 |
+
} else {
|
356 |
+
construct<Class>(v_h, alias_func(std::forward<CArgs>(args)...), true);
|
357 |
+
}
|
358 |
+
},
|
359 |
+
is_new_style_constructor(),
|
360 |
+
extra...);
|
361 |
+
}
|
362 |
+
};
|
363 |
+
|
364 |
+
/// Set just the C++ state. Same as `__init__`.
|
365 |
+
template <typename Class, typename T>
|
366 |
+
void setstate(value_and_holder &v_h, T &&result, bool need_alias) {
|
367 |
+
construct<Class>(v_h, std::forward<T>(result), need_alias);
|
368 |
+
}
|
369 |
+
|
370 |
+
/// Set both the C++ and Python states
|
371 |
+
template <typename Class,
|
372 |
+
typename T,
|
373 |
+
typename O,
|
374 |
+
enable_if_t<std::is_convertible<O, handle>::value, int> = 0>
|
375 |
+
void setstate(value_and_holder &v_h, std::pair<T, O> &&result, bool need_alias) {
|
376 |
+
construct<Class>(v_h, std::move(result.first), need_alias);
|
377 |
+
auto d = handle(result.second);
|
378 |
+
if (PyDict_Check(d.ptr()) && PyDict_Size(d.ptr()) == 0) {
|
379 |
+
// Skipping setattr below, to not force use of py::dynamic_attr() for Class unnecessarily.
|
380 |
+
// See PR #2972 for details.
|
381 |
+
return;
|
382 |
+
}
|
383 |
+
setattr((PyObject *) v_h.inst, "__dict__", d);
|
384 |
+
}
|
385 |
+
|
386 |
+
/// Implementation for py::pickle(GetState, SetState)
|
387 |
+
template <typename Get,
|
388 |
+
typename Set,
|
389 |
+
typename = function_signature_t<Get>,
|
390 |
+
typename = function_signature_t<Set>>
|
391 |
+
struct pickle_factory;
|
392 |
+
|
393 |
+
template <typename Get,
|
394 |
+
typename Set,
|
395 |
+
typename RetState,
|
396 |
+
typename Self,
|
397 |
+
typename NewInstance,
|
398 |
+
typename ArgState>
|
399 |
+
struct pickle_factory<Get, Set, RetState(Self), NewInstance(ArgState)> {
|
400 |
+
static_assert(std::is_same<intrinsic_t<RetState>, intrinsic_t<ArgState>>::value,
|
401 |
+
"The type returned by `__getstate__` must be the same "
|
402 |
+
"as the argument accepted by `__setstate__`");
|
403 |
+
|
404 |
+
remove_reference_t<Get> get;
|
405 |
+
remove_reference_t<Set> set;
|
406 |
+
|
407 |
+
pickle_factory(Get get, Set set) : get(std::forward<Get>(get)), set(std::forward<Set>(set)) {}
|
408 |
+
|
409 |
+
template <typename Class, typename... Extra>
|
410 |
+
void execute(Class &cl, const Extra &...extra) && {
|
411 |
+
cl.def("__getstate__", std::move(get));
|
412 |
+
|
413 |
+
#if defined(PYBIND11_CPP14)
|
414 |
+
cl.def(
|
415 |
+
"__setstate__",
|
416 |
+
[func = std::move(set)]
|
417 |
+
#else
|
418 |
+
auto &func = set;
|
419 |
+
cl.def(
|
420 |
+
"__setstate__",
|
421 |
+
[func]
|
422 |
+
#endif
|
423 |
+
(value_and_holder &v_h, ArgState state) {
|
424 |
+
setstate<Class>(
|
425 |
+
v_h, func(std::forward<ArgState>(state)), Py_TYPE(v_h.inst) != v_h.type->type);
|
426 |
+
},
|
427 |
+
is_new_style_constructor(),
|
428 |
+
extra...);
|
429 |
+
}
|
430 |
+
};
|
431 |
+
|
432 |
+
PYBIND11_NAMESPACE_END(initimpl)
|
433 |
+
PYBIND11_NAMESPACE_END(detail)
|
434 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/internals.h
ADDED
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/internals.h: Internal data structure and related functions
|
3 |
+
|
4 |
+
Copyright (c) 2017 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "common.h"
|
13 |
+
|
14 |
+
#if defined(WITH_THREAD) && defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
|
15 |
+
# include "../gil.h"
|
16 |
+
#endif
|
17 |
+
|
18 |
+
#include "../pytypes.h"
|
19 |
+
|
20 |
+
#include <exception>
|
21 |
+
|
22 |
+
/// Tracks the `internals` and `type_info` ABI version independent of the main library version.
|
23 |
+
///
|
24 |
+
/// Some portions of the code use an ABI that is conditional depending on this
|
25 |
+
/// version number. That allows ABI-breaking changes to be "pre-implemented".
|
26 |
+
/// Once the default version number is incremented, the conditional logic that
|
27 |
+
/// no longer applies can be removed. Additionally, users that need not
|
28 |
+
/// maintain ABI compatibility can increase the version number in order to take
|
29 |
+
/// advantage of any functionality/efficiency improvements that depend on the
|
30 |
+
/// newer ABI.
|
31 |
+
///
|
32 |
+
/// WARNING: If you choose to manually increase the ABI version, note that
|
33 |
+
/// pybind11 may not be tested as thoroughly with a non-default ABI version, and
|
34 |
+
/// further ABI-incompatible changes may be made before the ABI is officially
|
35 |
+
/// changed to the new version.
|
36 |
+
#ifndef PYBIND11_INTERNALS_VERSION
|
37 |
+
# if PY_VERSION_HEX >= 0x030C0000 || defined(_MSC_VER)
|
38 |
+
// Version bump for Python 3.12+, before first 3.12 beta release.
|
39 |
+
// Version bump for MSVC piggy-backed on PR #4779. See comments there.
|
40 |
+
# define PYBIND11_INTERNALS_VERSION 5
|
41 |
+
# else
|
42 |
+
# define PYBIND11_INTERNALS_VERSION 4
|
43 |
+
# endif
|
44 |
+
#endif
|
45 |
+
|
46 |
+
// This requirement is mainly to reduce the support burden (see PR #4570).
|
47 |
+
static_assert(PY_VERSION_HEX < 0x030C0000 || PYBIND11_INTERNALS_VERSION >= 5,
|
48 |
+
"pybind11 ABI version 5 is the minimum for Python 3.12+");
|
49 |
+
|
50 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
51 |
+
|
52 |
+
using ExceptionTranslator = void (*)(std::exception_ptr);
|
53 |
+
|
54 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
55 |
+
|
56 |
+
constexpr const char *internals_function_record_capsule_name = "pybind11_function_record_capsule";
|
57 |
+
|
58 |
+
// Forward declarations
|
59 |
+
inline PyTypeObject *make_static_property_type();
|
60 |
+
inline PyTypeObject *make_default_metaclass();
|
61 |
+
inline PyObject *make_object_base_type(PyTypeObject *metaclass);
|
62 |
+
|
63 |
+
// The old Python Thread Local Storage (TLS) API is deprecated in Python 3.7 in favor of the new
|
64 |
+
// Thread Specific Storage (TSS) API.
|
65 |
+
#if PY_VERSION_HEX >= 0x03070000
|
66 |
+
// Avoid unnecessary allocation of `Py_tss_t`, since we cannot use
|
67 |
+
// `Py_LIMITED_API` anyway.
|
68 |
+
# if PYBIND11_INTERNALS_VERSION > 4
|
69 |
+
# define PYBIND11_TLS_KEY_REF Py_tss_t &
|
70 |
+
# if defined(__clang__)
|
71 |
+
# define PYBIND11_TLS_KEY_INIT(var) \
|
72 |
+
_Pragma("clang diagnostic push") /**/ \
|
73 |
+
_Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") /**/ \
|
74 |
+
Py_tss_t var \
|
75 |
+
= Py_tss_NEEDS_INIT; \
|
76 |
+
_Pragma("clang diagnostic pop")
|
77 |
+
# elif defined(__GNUC__) && !defined(__INTEL_COMPILER)
|
78 |
+
# define PYBIND11_TLS_KEY_INIT(var) \
|
79 |
+
_Pragma("GCC diagnostic push") /**/ \
|
80 |
+
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"") /**/ \
|
81 |
+
Py_tss_t var \
|
82 |
+
= Py_tss_NEEDS_INIT; \
|
83 |
+
_Pragma("GCC diagnostic pop")
|
84 |
+
# else
|
85 |
+
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t var = Py_tss_NEEDS_INIT;
|
86 |
+
# endif
|
87 |
+
# define PYBIND11_TLS_KEY_CREATE(var) (PyThread_tss_create(&(var)) == 0)
|
88 |
+
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get(&(key))
|
89 |
+
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set(&(key), (value))
|
90 |
+
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set(&(key), nullptr)
|
91 |
+
# define PYBIND11_TLS_FREE(key) PyThread_tss_delete(&(key))
|
92 |
+
# else
|
93 |
+
# define PYBIND11_TLS_KEY_REF Py_tss_t *
|
94 |
+
# define PYBIND11_TLS_KEY_INIT(var) Py_tss_t *var = nullptr;
|
95 |
+
# define PYBIND11_TLS_KEY_CREATE(var) \
|
96 |
+
(((var) = PyThread_tss_alloc()) != nullptr && (PyThread_tss_create((var)) == 0))
|
97 |
+
# define PYBIND11_TLS_GET_VALUE(key) PyThread_tss_get((key))
|
98 |
+
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_tss_set((key), (value))
|
99 |
+
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_tss_set((key), nullptr)
|
100 |
+
# define PYBIND11_TLS_FREE(key) PyThread_tss_free(key)
|
101 |
+
# endif
|
102 |
+
#else
|
103 |
+
// Usually an int but a long on Cygwin64 with Python 3.x
|
104 |
+
# define PYBIND11_TLS_KEY_REF decltype(PyThread_create_key())
|
105 |
+
# define PYBIND11_TLS_KEY_INIT(var) PYBIND11_TLS_KEY_REF var = 0;
|
106 |
+
# define PYBIND11_TLS_KEY_CREATE(var) (((var) = PyThread_create_key()) != -1)
|
107 |
+
# define PYBIND11_TLS_GET_VALUE(key) PyThread_get_key_value((key))
|
108 |
+
# if defined(PYPY_VERSION)
|
109 |
+
// On CPython < 3.4 and on PyPy, `PyThread_set_key_value` strangely does not set
|
110 |
+
// the value if it has already been set. Instead, it must first be deleted and
|
111 |
+
// then set again.
|
112 |
+
inline void tls_replace_value(PYBIND11_TLS_KEY_REF key, void *value) {
|
113 |
+
PyThread_delete_key_value(key);
|
114 |
+
PyThread_set_key_value(key, value);
|
115 |
+
}
|
116 |
+
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_delete_key_value(key)
|
117 |
+
# define PYBIND11_TLS_REPLACE_VALUE(key, value) \
|
118 |
+
::pybind11::detail::tls_replace_value((key), (value))
|
119 |
+
# else
|
120 |
+
# define PYBIND11_TLS_DELETE_VALUE(key) PyThread_set_key_value((key), nullptr)
|
121 |
+
# define PYBIND11_TLS_REPLACE_VALUE(key, value) PyThread_set_key_value((key), (value))
|
122 |
+
# endif
|
123 |
+
# define PYBIND11_TLS_FREE(key) (void) key
|
124 |
+
#endif
|
125 |
+
|
126 |
+
// Python loads modules by default with dlopen with the RTLD_LOCAL flag; under libc++ and possibly
|
127 |
+
// other STLs, this means `typeid(A)` from one module won't equal `typeid(A)` from another module
|
128 |
+
// even when `A` is the same, non-hidden-visibility type (e.g. from a common include). Under
|
129 |
+
// libstdc++, this doesn't happen: equality and the type_index hash are based on the type name,
|
130 |
+
// which works. If not under a known-good stl, provide our own name-based hash and equality
|
131 |
+
// functions that use the type name.
|
132 |
+
#if (PYBIND11_INTERNALS_VERSION <= 4 && defined(__GLIBCXX__)) \
|
133 |
+
|| (PYBIND11_INTERNALS_VERSION >= 5 && !defined(_LIBCPP_VERSION))
|
134 |
+
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) { return lhs == rhs; }
|
135 |
+
using type_hash = std::hash<std::type_index>;
|
136 |
+
using type_equal_to = std::equal_to<std::type_index>;
|
137 |
+
#else
|
138 |
+
inline bool same_type(const std::type_info &lhs, const std::type_info &rhs) {
|
139 |
+
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
|
140 |
+
}
|
141 |
+
|
142 |
+
struct type_hash {
|
143 |
+
size_t operator()(const std::type_index &t) const {
|
144 |
+
size_t hash = 5381;
|
145 |
+
const char *ptr = t.name();
|
146 |
+
while (auto c = static_cast<unsigned char>(*ptr++)) {
|
147 |
+
hash = (hash * 33) ^ c;
|
148 |
+
}
|
149 |
+
return hash;
|
150 |
+
}
|
151 |
+
};
|
152 |
+
|
153 |
+
struct type_equal_to {
|
154 |
+
bool operator()(const std::type_index &lhs, const std::type_index &rhs) const {
|
155 |
+
return lhs.name() == rhs.name() || std::strcmp(lhs.name(), rhs.name()) == 0;
|
156 |
+
}
|
157 |
+
};
|
158 |
+
#endif
|
159 |
+
|
160 |
+
template <typename value_type>
|
161 |
+
using type_map = std::unordered_map<std::type_index, value_type, type_hash, type_equal_to>;
|
162 |
+
|
163 |
+
struct override_hash {
|
164 |
+
inline size_t operator()(const std::pair<const PyObject *, const char *> &v) const {
|
165 |
+
size_t value = std::hash<const void *>()(v.first);
|
166 |
+
value ^= std::hash<const void *>()(v.second) + 0x9e3779b9 + (value << 6) + (value >> 2);
|
167 |
+
return value;
|
168 |
+
}
|
169 |
+
};
|
170 |
+
|
171 |
+
/// Internal data structure used to track registered instances and types.
|
172 |
+
/// Whenever binary incompatible changes are made to this structure,
|
173 |
+
/// `PYBIND11_INTERNALS_VERSION` must be incremented.
|
174 |
+
struct internals {
|
175 |
+
// std::type_index -> pybind11's type information
|
176 |
+
type_map<type_info *> registered_types_cpp;
|
177 |
+
// PyTypeObject* -> base type_info(s)
|
178 |
+
std::unordered_map<PyTypeObject *, std::vector<type_info *>> registered_types_py;
|
179 |
+
std::unordered_multimap<const void *, instance *> registered_instances; // void * -> instance*
|
180 |
+
std::unordered_set<std::pair<const PyObject *, const char *>, override_hash>
|
181 |
+
inactive_override_cache;
|
182 |
+
type_map<std::vector<bool (*)(PyObject *, void *&)>> direct_conversions;
|
183 |
+
std::unordered_map<const PyObject *, std::vector<PyObject *>> patients;
|
184 |
+
std::forward_list<ExceptionTranslator> registered_exception_translators;
|
185 |
+
std::unordered_map<std::string, void *> shared_data; // Custom data to be shared across
|
186 |
+
// extensions
|
187 |
+
#if PYBIND11_INTERNALS_VERSION == 4
|
188 |
+
std::vector<PyObject *> unused_loader_patient_stack_remove_at_v5;
|
189 |
+
#endif
|
190 |
+
std::forward_list<std::string> static_strings; // Stores the std::strings backing
|
191 |
+
// detail::c_str()
|
192 |
+
PyTypeObject *static_property_type;
|
193 |
+
PyTypeObject *default_metaclass;
|
194 |
+
PyObject *instance_base;
|
195 |
+
#if defined(WITH_THREAD)
|
196 |
+
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
|
197 |
+
PYBIND11_TLS_KEY_INIT(tstate)
|
198 |
+
# if PYBIND11_INTERNALS_VERSION > 4
|
199 |
+
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
|
200 |
+
# endif // PYBIND11_INTERNALS_VERSION > 4
|
201 |
+
// Unused if PYBIND11_SIMPLE_GIL_MANAGEMENT is defined:
|
202 |
+
PyInterpreterState *istate = nullptr;
|
203 |
+
|
204 |
+
# if PYBIND11_INTERNALS_VERSION > 4
|
205 |
+
// Note that we have to use a std::string to allocate memory to ensure a unique address
|
206 |
+
// We want unique addresses since we use pointer equality to compare function records
|
207 |
+
std::string function_record_capsule_name = internals_function_record_capsule_name;
|
208 |
+
# endif
|
209 |
+
|
210 |
+
internals() = default;
|
211 |
+
internals(const internals &other) = delete;
|
212 |
+
internals &operator=(const internals &other) = delete;
|
213 |
+
~internals() {
|
214 |
+
# if PYBIND11_INTERNALS_VERSION > 4
|
215 |
+
PYBIND11_TLS_FREE(loader_life_support_tls_key);
|
216 |
+
# endif // PYBIND11_INTERNALS_VERSION > 4
|
217 |
+
|
218 |
+
// This destructor is called *after* Py_Finalize() in finalize_interpreter().
|
219 |
+
// That *SHOULD BE* fine. The following details what happens when PyThread_tss_free is
|
220 |
+
// called. PYBIND11_TLS_FREE is PyThread_tss_free on python 3.7+. On older python, it does
|
221 |
+
// nothing. PyThread_tss_free calls PyThread_tss_delete and PyMem_RawFree.
|
222 |
+
// PyThread_tss_delete just calls TlsFree (on Windows) or pthread_key_delete (on *NIX).
|
223 |
+
// Neither of those have anything to do with CPython internals. PyMem_RawFree *requires*
|
224 |
+
// that the `tstate` be allocated with the CPython allocator.
|
225 |
+
PYBIND11_TLS_FREE(tstate);
|
226 |
+
}
|
227 |
+
#endif
|
228 |
+
};
|
229 |
+
|
230 |
+
/// Additional type information which does not fit into the PyTypeObject.
|
231 |
+
/// Changes to this struct also require bumping `PYBIND11_INTERNALS_VERSION`.
|
232 |
+
struct type_info {
|
233 |
+
PyTypeObject *type;
|
234 |
+
const std::type_info *cpptype;
|
235 |
+
size_t type_size, type_align, holder_size_in_ptrs;
|
236 |
+
void *(*operator_new)(size_t);
|
237 |
+
void (*init_instance)(instance *, const void *);
|
238 |
+
void (*dealloc)(value_and_holder &v_h);
|
239 |
+
std::vector<PyObject *(*) (PyObject *, PyTypeObject *)> implicit_conversions;
|
240 |
+
std::vector<std::pair<const std::type_info *, void *(*) (void *)>> implicit_casts;
|
241 |
+
std::vector<bool (*)(PyObject *, void *&)> *direct_conversions;
|
242 |
+
buffer_info *(*get_buffer)(PyObject *, void *) = nullptr;
|
243 |
+
void *get_buffer_data = nullptr;
|
244 |
+
void *(*module_local_load)(PyObject *, const type_info *) = nullptr;
|
245 |
+
/* A simple type never occurs as a (direct or indirect) parent
|
246 |
+
* of a class that makes use of multiple inheritance.
|
247 |
+
* A type can be simple even if it has non-simple ancestors as long as it has no descendants.
|
248 |
+
*/
|
249 |
+
bool simple_type : 1;
|
250 |
+
/* True if there is no multiple inheritance in this type's inheritance tree */
|
251 |
+
bool simple_ancestors : 1;
|
252 |
+
/* for base vs derived holder_type checks */
|
253 |
+
bool default_holder : 1;
|
254 |
+
/* true if this is a type registered with py::module_local */
|
255 |
+
bool module_local : 1;
|
256 |
+
};
|
257 |
+
|
258 |
+
/// On MSVC, debug and release builds are not ABI-compatible!
|
259 |
+
#if defined(_MSC_VER) && defined(_DEBUG)
|
260 |
+
# define PYBIND11_BUILD_TYPE "_debug"
|
261 |
+
#else
|
262 |
+
# define PYBIND11_BUILD_TYPE ""
|
263 |
+
#endif
|
264 |
+
|
265 |
+
/// Let's assume that different compilers are ABI-incompatible.
|
266 |
+
/// A user can manually set this string if they know their
|
267 |
+
/// compiler is compatible.
|
268 |
+
#ifndef PYBIND11_COMPILER_TYPE
|
269 |
+
# if defined(_MSC_VER)
|
270 |
+
# define PYBIND11_COMPILER_TYPE "_msvc"
|
271 |
+
# elif defined(__INTEL_COMPILER)
|
272 |
+
# define PYBIND11_COMPILER_TYPE "_icc"
|
273 |
+
# elif defined(__clang__)
|
274 |
+
# define PYBIND11_COMPILER_TYPE "_clang"
|
275 |
+
# elif defined(__PGI)
|
276 |
+
# define PYBIND11_COMPILER_TYPE "_pgi"
|
277 |
+
# elif defined(__MINGW32__)
|
278 |
+
# define PYBIND11_COMPILER_TYPE "_mingw"
|
279 |
+
# elif defined(__CYGWIN__)
|
280 |
+
# define PYBIND11_COMPILER_TYPE "_gcc_cygwin"
|
281 |
+
# elif defined(__GNUC__)
|
282 |
+
# define PYBIND11_COMPILER_TYPE "_gcc"
|
283 |
+
# else
|
284 |
+
# define PYBIND11_COMPILER_TYPE "_unknown"
|
285 |
+
# endif
|
286 |
+
#endif
|
287 |
+
|
288 |
+
/// Also standard libs
|
289 |
+
#ifndef PYBIND11_STDLIB
|
290 |
+
# if defined(_LIBCPP_VERSION)
|
291 |
+
# define PYBIND11_STDLIB "_libcpp"
|
292 |
+
# elif defined(__GLIBCXX__) || defined(__GLIBCPP__)
|
293 |
+
# define PYBIND11_STDLIB "_libstdcpp"
|
294 |
+
# else
|
295 |
+
# define PYBIND11_STDLIB ""
|
296 |
+
# endif
|
297 |
+
#endif
|
298 |
+
|
299 |
+
/// On Linux/OSX, changes in __GXX_ABI_VERSION__ indicate ABI incompatibility.
|
300 |
+
/// On MSVC, changes in _MSC_VER may indicate ABI incompatibility (#2898).
|
301 |
+
#ifndef PYBIND11_BUILD_ABI
|
302 |
+
# if defined(__GXX_ABI_VERSION)
|
303 |
+
# define PYBIND11_BUILD_ABI "_cxxabi" PYBIND11_TOSTRING(__GXX_ABI_VERSION)
|
304 |
+
# elif defined(_MSC_VER)
|
305 |
+
# define PYBIND11_BUILD_ABI "_mscver" PYBIND11_TOSTRING(_MSC_VER)
|
306 |
+
# else
|
307 |
+
# define PYBIND11_BUILD_ABI ""
|
308 |
+
# endif
|
309 |
+
#endif
|
310 |
+
|
311 |
+
#ifndef PYBIND11_INTERNALS_KIND
|
312 |
+
# if defined(WITH_THREAD)
|
313 |
+
# define PYBIND11_INTERNALS_KIND ""
|
314 |
+
# else
|
315 |
+
# define PYBIND11_INTERNALS_KIND "_without_thread"
|
316 |
+
# endif
|
317 |
+
#endif
|
318 |
+
|
319 |
+
#define PYBIND11_INTERNALS_ID \
|
320 |
+
"__pybind11_internals_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
|
321 |
+
PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI \
|
322 |
+
PYBIND11_BUILD_TYPE "__"
|
323 |
+
|
324 |
+
#define PYBIND11_MODULE_LOCAL_ID \
|
325 |
+
"__pybind11_module_local_v" PYBIND11_TOSTRING(PYBIND11_INTERNALS_VERSION) \
|
326 |
+
PYBIND11_INTERNALS_KIND PYBIND11_COMPILER_TYPE PYBIND11_STDLIB PYBIND11_BUILD_ABI \
|
327 |
+
PYBIND11_BUILD_TYPE "__"
|
328 |
+
|
329 |
+
/// Each module locally stores a pointer to the `internals` data. The data
|
330 |
+
/// itself is shared among modules with the same `PYBIND11_INTERNALS_ID`.
|
331 |
+
inline internals **&get_internals_pp() {
|
332 |
+
static internals **internals_pp = nullptr;
|
333 |
+
return internals_pp;
|
334 |
+
}
|
335 |
+
|
336 |
+
// forward decl
|
337 |
+
inline void translate_exception(std::exception_ptr);
|
338 |
+
|
339 |
+
template <class T,
|
340 |
+
enable_if_t<std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
|
341 |
+
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
|
342 |
+
std::exception_ptr nested = exc.nested_ptr();
|
343 |
+
if (nested != nullptr && nested != p) {
|
344 |
+
translate_exception(nested);
|
345 |
+
return true;
|
346 |
+
}
|
347 |
+
return false;
|
348 |
+
}
|
349 |
+
|
350 |
+
template <class T,
|
351 |
+
enable_if_t<!std::is_same<std::nested_exception, remove_cvref_t<T>>::value, int> = 0>
|
352 |
+
bool handle_nested_exception(const T &exc, const std::exception_ptr &p) {
|
353 |
+
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(exc))) {
|
354 |
+
return handle_nested_exception(*nep, p);
|
355 |
+
}
|
356 |
+
return false;
|
357 |
+
}
|
358 |
+
|
359 |
+
inline bool raise_err(PyObject *exc_type, const char *msg) {
|
360 |
+
if (PyErr_Occurred()) {
|
361 |
+
raise_from(exc_type, msg);
|
362 |
+
return true;
|
363 |
+
}
|
364 |
+
set_error(exc_type, msg);
|
365 |
+
return false;
|
366 |
+
}
|
367 |
+
|
368 |
+
inline void translate_exception(std::exception_ptr p) {
|
369 |
+
if (!p) {
|
370 |
+
return;
|
371 |
+
}
|
372 |
+
try {
|
373 |
+
std::rethrow_exception(p);
|
374 |
+
} catch (error_already_set &e) {
|
375 |
+
handle_nested_exception(e, p);
|
376 |
+
e.restore();
|
377 |
+
return;
|
378 |
+
} catch (const builtin_exception &e) {
|
379 |
+
// Could not use template since it's an abstract class.
|
380 |
+
if (const auto *nep = dynamic_cast<const std::nested_exception *>(std::addressof(e))) {
|
381 |
+
handle_nested_exception(*nep, p);
|
382 |
+
}
|
383 |
+
e.set_error();
|
384 |
+
return;
|
385 |
+
} catch (const std::bad_alloc &e) {
|
386 |
+
handle_nested_exception(e, p);
|
387 |
+
raise_err(PyExc_MemoryError, e.what());
|
388 |
+
return;
|
389 |
+
} catch (const std::domain_error &e) {
|
390 |
+
handle_nested_exception(e, p);
|
391 |
+
raise_err(PyExc_ValueError, e.what());
|
392 |
+
return;
|
393 |
+
} catch (const std::invalid_argument &e) {
|
394 |
+
handle_nested_exception(e, p);
|
395 |
+
raise_err(PyExc_ValueError, e.what());
|
396 |
+
return;
|
397 |
+
} catch (const std::length_error &e) {
|
398 |
+
handle_nested_exception(e, p);
|
399 |
+
raise_err(PyExc_ValueError, e.what());
|
400 |
+
return;
|
401 |
+
} catch (const std::out_of_range &e) {
|
402 |
+
handle_nested_exception(e, p);
|
403 |
+
raise_err(PyExc_IndexError, e.what());
|
404 |
+
return;
|
405 |
+
} catch (const std::range_error &e) {
|
406 |
+
handle_nested_exception(e, p);
|
407 |
+
raise_err(PyExc_ValueError, e.what());
|
408 |
+
return;
|
409 |
+
} catch (const std::overflow_error &e) {
|
410 |
+
handle_nested_exception(e, p);
|
411 |
+
raise_err(PyExc_OverflowError, e.what());
|
412 |
+
return;
|
413 |
+
} catch (const std::exception &e) {
|
414 |
+
handle_nested_exception(e, p);
|
415 |
+
raise_err(PyExc_RuntimeError, e.what());
|
416 |
+
return;
|
417 |
+
} catch (const std::nested_exception &e) {
|
418 |
+
handle_nested_exception(e, p);
|
419 |
+
raise_err(PyExc_RuntimeError, "Caught an unknown nested exception!");
|
420 |
+
return;
|
421 |
+
} catch (...) {
|
422 |
+
raise_err(PyExc_RuntimeError, "Caught an unknown exception!");
|
423 |
+
return;
|
424 |
+
}
|
425 |
+
}
|
426 |
+
|
427 |
+
#if !defined(__GLIBCXX__)
|
428 |
+
inline void translate_local_exception(std::exception_ptr p) {
|
429 |
+
try {
|
430 |
+
if (p) {
|
431 |
+
std::rethrow_exception(p);
|
432 |
+
}
|
433 |
+
} catch (error_already_set &e) {
|
434 |
+
e.restore();
|
435 |
+
return;
|
436 |
+
} catch (const builtin_exception &e) {
|
437 |
+
e.set_error();
|
438 |
+
return;
|
439 |
+
}
|
440 |
+
}
|
441 |
+
#endif
|
442 |
+
|
443 |
+
inline object get_python_state_dict() {
|
444 |
+
object state_dict;
|
445 |
+
#if PYBIND11_INTERNALS_VERSION <= 4 || PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION)
|
446 |
+
state_dict = reinterpret_borrow<object>(PyEval_GetBuiltins());
|
447 |
+
#else
|
448 |
+
# if PY_VERSION_HEX < 0x03090000
|
449 |
+
PyInterpreterState *istate = _PyInterpreterState_Get();
|
450 |
+
# else
|
451 |
+
PyInterpreterState *istate = PyInterpreterState_Get();
|
452 |
+
# endif
|
453 |
+
if (istate) {
|
454 |
+
state_dict = reinterpret_borrow<object>(PyInterpreterState_GetDict(istate));
|
455 |
+
}
|
456 |
+
#endif
|
457 |
+
if (!state_dict) {
|
458 |
+
raise_from(PyExc_SystemError, "pybind11::detail::get_python_state_dict() FAILED");
|
459 |
+
throw error_already_set();
|
460 |
+
}
|
461 |
+
return state_dict;
|
462 |
+
}
|
463 |
+
|
464 |
+
inline object get_internals_obj_from_state_dict(handle state_dict) {
|
465 |
+
return reinterpret_borrow<object>(dict_getitemstring(state_dict.ptr(), PYBIND11_INTERNALS_ID));
|
466 |
+
}
|
467 |
+
|
468 |
+
inline internals **get_internals_pp_from_capsule(handle obj) {
|
469 |
+
void *raw_ptr = PyCapsule_GetPointer(obj.ptr(), /*name=*/nullptr);
|
470 |
+
if (raw_ptr == nullptr) {
|
471 |
+
raise_from(PyExc_SystemError, "pybind11::detail::get_internals_pp_from_capsule() FAILED");
|
472 |
+
throw error_already_set();
|
473 |
+
}
|
474 |
+
return static_cast<internals **>(raw_ptr);
|
475 |
+
}
|
476 |
+
|
477 |
+
/// Return a reference to the current `internals` data
|
478 |
+
PYBIND11_NOINLINE internals &get_internals() {
|
479 |
+
auto **&internals_pp = get_internals_pp();
|
480 |
+
if (internals_pp && *internals_pp) {
|
481 |
+
return **internals_pp;
|
482 |
+
}
|
483 |
+
|
484 |
+
#if defined(WITH_THREAD)
|
485 |
+
# if defined(PYBIND11_SIMPLE_GIL_MANAGEMENT)
|
486 |
+
gil_scoped_acquire gil;
|
487 |
+
# else
|
488 |
+
// Ensure that the GIL is held since we will need to make Python calls.
|
489 |
+
// Cannot use py::gil_scoped_acquire here since that constructor calls get_internals.
|
490 |
+
struct gil_scoped_acquire_local {
|
491 |
+
gil_scoped_acquire_local() : state(PyGILState_Ensure()) {}
|
492 |
+
gil_scoped_acquire_local(const gil_scoped_acquire_local &) = delete;
|
493 |
+
gil_scoped_acquire_local &operator=(const gil_scoped_acquire_local &) = delete;
|
494 |
+
~gil_scoped_acquire_local() { PyGILState_Release(state); }
|
495 |
+
const PyGILState_STATE state;
|
496 |
+
} gil;
|
497 |
+
# endif
|
498 |
+
#endif
|
499 |
+
error_scope err_scope;
|
500 |
+
|
501 |
+
dict state_dict = get_python_state_dict();
|
502 |
+
if (object internals_obj = get_internals_obj_from_state_dict(state_dict)) {
|
503 |
+
internals_pp = get_internals_pp_from_capsule(internals_obj);
|
504 |
+
}
|
505 |
+
if (internals_pp && *internals_pp) {
|
506 |
+
// We loaded the internals through `state_dict`, which means that our `error_already_set`
|
507 |
+
// and `builtin_exception` may be different local classes than the ones set up in the
|
508 |
+
// initial exception translator, below, so add another for our local exception classes.
|
509 |
+
//
|
510 |
+
// libstdc++ doesn't require this (types there are identified only by name)
|
511 |
+
// libc++ with CPython doesn't require this (types are explicitly exported)
|
512 |
+
// libc++ with PyPy still need it, awaiting further investigation
|
513 |
+
#if !defined(__GLIBCXX__)
|
514 |
+
(*internals_pp)->registered_exception_translators.push_front(&translate_local_exception);
|
515 |
+
#endif
|
516 |
+
} else {
|
517 |
+
if (!internals_pp) {
|
518 |
+
internals_pp = new internals *();
|
519 |
+
}
|
520 |
+
auto *&internals_ptr = *internals_pp;
|
521 |
+
internals_ptr = new internals();
|
522 |
+
#if defined(WITH_THREAD)
|
523 |
+
|
524 |
+
PyThreadState *tstate = PyThreadState_Get();
|
525 |
+
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
526 |
+
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->tstate)) {
|
527 |
+
pybind11_fail("get_internals: could not successfully initialize the tstate TSS key!");
|
528 |
+
}
|
529 |
+
PYBIND11_TLS_REPLACE_VALUE(internals_ptr->tstate, tstate);
|
530 |
+
|
531 |
+
# if PYBIND11_INTERNALS_VERSION > 4
|
532 |
+
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
533 |
+
if (!PYBIND11_TLS_KEY_CREATE(internals_ptr->loader_life_support_tls_key)) {
|
534 |
+
pybind11_fail("get_internals: could not successfully initialize the "
|
535 |
+
"loader_life_support TSS key!");
|
536 |
+
}
|
537 |
+
# endif
|
538 |
+
internals_ptr->istate = tstate->interp;
|
539 |
+
#endif
|
540 |
+
state_dict[PYBIND11_INTERNALS_ID] = capsule(internals_pp);
|
541 |
+
internals_ptr->registered_exception_translators.push_front(&translate_exception);
|
542 |
+
internals_ptr->static_property_type = make_static_property_type();
|
543 |
+
internals_ptr->default_metaclass = make_default_metaclass();
|
544 |
+
internals_ptr->instance_base = make_object_base_type(internals_ptr->default_metaclass);
|
545 |
+
}
|
546 |
+
return **internals_pp;
|
547 |
+
}
|
548 |
+
|
549 |
+
// the internals struct (above) is shared between all the modules. local_internals are only
|
550 |
+
// for a single module. Any changes made to internals may require an update to
|
551 |
+
// PYBIND11_INTERNALS_VERSION, breaking backwards compatibility. local_internals is, by design,
|
552 |
+
// restricted to a single module. Whether a module has local internals or not should not
|
553 |
+
// impact any other modules, because the only things accessing the local internals is the
|
554 |
+
// module that contains them.
|
555 |
+
struct local_internals {
|
556 |
+
type_map<type_info *> registered_types_cpp;
|
557 |
+
std::forward_list<ExceptionTranslator> registered_exception_translators;
|
558 |
+
#if defined(WITH_THREAD) && PYBIND11_INTERNALS_VERSION == 4
|
559 |
+
|
560 |
+
// For ABI compatibility, we can't store the loader_life_support TLS key in
|
561 |
+
// the `internals` struct directly. Instead, we store it in `shared_data` and
|
562 |
+
// cache a copy in `local_internals`. If we allocated a separate TLS key for
|
563 |
+
// each instance of `local_internals`, we could end up allocating hundreds of
|
564 |
+
// TLS keys if hundreds of different pybind11 modules are loaded (which is a
|
565 |
+
// plausible number).
|
566 |
+
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
|
567 |
+
|
568 |
+
// Holds the shared TLS key for the loader_life_support stack.
|
569 |
+
struct shared_loader_life_support_data {
|
570 |
+
PYBIND11_TLS_KEY_INIT(loader_life_support_tls_key)
|
571 |
+
shared_loader_life_support_data() {
|
572 |
+
// NOLINTNEXTLINE(bugprone-assignment-in-if-condition)
|
573 |
+
if (!PYBIND11_TLS_KEY_CREATE(loader_life_support_tls_key)) {
|
574 |
+
pybind11_fail("local_internals: could not successfully initialize the "
|
575 |
+
"loader_life_support TLS key!");
|
576 |
+
}
|
577 |
+
}
|
578 |
+
// We can't help but leak the TLS key, because Python never unloads extension modules.
|
579 |
+
};
|
580 |
+
|
581 |
+
local_internals() {
|
582 |
+
auto &internals = get_internals();
|
583 |
+
// Get or create the `loader_life_support_stack_key`.
|
584 |
+
auto &ptr = internals.shared_data["_life_support"];
|
585 |
+
if (!ptr) {
|
586 |
+
ptr = new shared_loader_life_support_data;
|
587 |
+
}
|
588 |
+
loader_life_support_tls_key
|
589 |
+
= static_cast<shared_loader_life_support_data *>(ptr)->loader_life_support_tls_key;
|
590 |
+
}
|
591 |
+
#endif // defined(WITH_THREAD) && PYBIND11_INTERNALS_VERSION == 4
|
592 |
+
};
|
593 |
+
|
594 |
+
/// Works like `get_internals`, but for things which are locally registered.
|
595 |
+
inline local_internals &get_local_internals() {
|
596 |
+
// Current static can be created in the interpreter finalization routine. If the later will be
|
597 |
+
// destroyed in another static variable destructor, creation of this static there will cause
|
598 |
+
// static deinitialization fiasco. In order to avoid it we avoid destruction of the
|
599 |
+
// local_internals static. One can read more about the problem and current solution here:
|
600 |
+
// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
|
601 |
+
static auto *locals = new local_internals();
|
602 |
+
return *locals;
|
603 |
+
}
|
604 |
+
|
605 |
+
/// Constructs a std::string with the given arguments, stores it in `internals`, and returns its
|
606 |
+
/// `c_str()`. Such strings objects have a long storage duration -- the internal strings are only
|
607 |
+
/// cleared when the program exits or after interpreter shutdown (when embedding), and so are
|
608 |
+
/// suitable for c-style strings needed by Python internals (such as PyTypeObject's tp_name).
|
609 |
+
template <typename... Args>
|
610 |
+
const char *c_str(Args &&...args) {
|
611 |
+
auto &strings = get_internals().static_strings;
|
612 |
+
strings.emplace_front(std::forward<Args>(args)...);
|
613 |
+
return strings.front().c_str();
|
614 |
+
}
|
615 |
+
|
616 |
+
inline const char *get_function_record_capsule_name() {
|
617 |
+
#if PYBIND11_INTERNALS_VERSION > 4
|
618 |
+
return get_internals().function_record_capsule_name.c_str();
|
619 |
+
#else
|
620 |
+
return nullptr;
|
621 |
+
#endif
|
622 |
+
}
|
623 |
+
|
624 |
+
// Determine whether or not the following capsule contains a pybind11 function record.
|
625 |
+
// Note that we use `internals` to make sure that only ABI compatible records are touched.
|
626 |
+
//
|
627 |
+
// This check is currently used in two places:
|
628 |
+
// - An important optimization in functional.h to avoid overhead in C++ -> Python -> C++
|
629 |
+
// - The sibling feature of cpp_function to allow overloads
|
630 |
+
inline bool is_function_record_capsule(const capsule &cap) {
|
631 |
+
// Pointer equality as we rely on internals() to ensure unique pointers
|
632 |
+
return cap.name() == get_function_record_capsule_name();
|
633 |
+
}
|
634 |
+
|
635 |
+
PYBIND11_NAMESPACE_END(detail)
|
636 |
+
|
637 |
+
/// Returns a named pointer that is shared among all extension modules (using the same
|
638 |
+
/// pybind11 version) running in the current interpreter. Names starting with underscores
|
639 |
+
/// are reserved for internal usage. Returns `nullptr` if no matching entry was found.
|
640 |
+
PYBIND11_NOINLINE void *get_shared_data(const std::string &name) {
|
641 |
+
auto &internals = detail::get_internals();
|
642 |
+
auto it = internals.shared_data.find(name);
|
643 |
+
return it != internals.shared_data.end() ? it->second : nullptr;
|
644 |
+
}
|
645 |
+
|
646 |
+
/// Set the shared data that can be later recovered by `get_shared_data()`.
|
647 |
+
PYBIND11_NOINLINE void *set_shared_data(const std::string &name, void *data) {
|
648 |
+
detail::get_internals().shared_data[name] = data;
|
649 |
+
return data;
|
650 |
+
}
|
651 |
+
|
652 |
+
/// Returns a typed reference to a shared data entry (by using `get_shared_data()`) if
|
653 |
+
/// such entry exists. Otherwise, a new object of default-constructible type `T` is
|
654 |
+
/// added to the shared data under the given name and a reference to it is returned.
|
655 |
+
template <typename T>
|
656 |
+
T &get_or_create_shared_data(const std::string &name) {
|
657 |
+
auto &internals = detail::get_internals();
|
658 |
+
auto it = internals.shared_data.find(name);
|
659 |
+
T *ptr = (T *) (it != internals.shared_data.end() ? it->second : nullptr);
|
660 |
+
if (!ptr) {
|
661 |
+
ptr = new T();
|
662 |
+
internals.shared_data[name] = ptr;
|
663 |
+
}
|
664 |
+
return *ptr;
|
665 |
+
}
|
666 |
+
|
667 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|
venv/lib/python3.10/site-packages/torch/include/pybind11/detail/type_caster_base.h
ADDED
@@ -0,0 +1,1218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/*
|
2 |
+
pybind11/detail/type_caster_base.h (originally first part of pybind11/cast.h)
|
3 |
+
|
4 |
+
Copyright (c) 2016 Wenzel Jakob <[email protected]>
|
5 |
+
|
6 |
+
All rights reserved. Use of this source code is governed by a
|
7 |
+
BSD-style license that can be found in the LICENSE file.
|
8 |
+
*/
|
9 |
+
|
10 |
+
#pragma once
|
11 |
+
|
12 |
+
#include "../pytypes.h"
|
13 |
+
#include "common.h"
|
14 |
+
#include "descr.h"
|
15 |
+
#include "internals.h"
|
16 |
+
#include "typeid.h"
|
17 |
+
|
18 |
+
#include <cstdint>
|
19 |
+
#include <iterator>
|
20 |
+
#include <new>
|
21 |
+
#include <string>
|
22 |
+
#include <type_traits>
|
23 |
+
#include <typeindex>
|
24 |
+
#include <typeinfo>
|
25 |
+
#include <unordered_map>
|
26 |
+
#include <utility>
|
27 |
+
#include <vector>
|
28 |
+
|
29 |
+
PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
|
30 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
31 |
+
|
32 |
+
/// A life support system for temporary objects created by `type_caster::load()`.
|
33 |
+
/// Adding a patient will keep it alive up until the enclosing function returns.
|
34 |
+
class loader_life_support {
|
35 |
+
private:
|
36 |
+
loader_life_support *parent = nullptr;
|
37 |
+
std::unordered_set<PyObject *> keep_alive;
|
38 |
+
|
39 |
+
#if defined(WITH_THREAD)
|
40 |
+
// Store stack pointer in thread-local storage.
|
41 |
+
static PYBIND11_TLS_KEY_REF get_stack_tls_key() {
|
42 |
+
# if PYBIND11_INTERNALS_VERSION == 4
|
43 |
+
return get_local_internals().loader_life_support_tls_key;
|
44 |
+
# else
|
45 |
+
return get_internals().loader_life_support_tls_key;
|
46 |
+
# endif
|
47 |
+
}
|
48 |
+
static loader_life_support *get_stack_top() {
|
49 |
+
return static_cast<loader_life_support *>(PYBIND11_TLS_GET_VALUE(get_stack_tls_key()));
|
50 |
+
}
|
51 |
+
static void set_stack_top(loader_life_support *value) {
|
52 |
+
PYBIND11_TLS_REPLACE_VALUE(get_stack_tls_key(), value);
|
53 |
+
}
|
54 |
+
#else
|
55 |
+
// Use single global variable for stack.
|
56 |
+
static loader_life_support **get_stack_pp() {
|
57 |
+
static loader_life_support *global_stack = nullptr;
|
58 |
+
return global_stack;
|
59 |
+
}
|
60 |
+
static loader_life_support *get_stack_top() { return *get_stack_pp(); }
|
61 |
+
static void set_stack_top(loader_life_support *value) { *get_stack_pp() = value; }
|
62 |
+
#endif
|
63 |
+
|
64 |
+
public:
|
65 |
+
/// A new patient frame is created when a function is entered
|
66 |
+
loader_life_support() : parent{get_stack_top()} { set_stack_top(this); }
|
67 |
+
|
68 |
+
/// ... and destroyed after it returns
|
69 |
+
~loader_life_support() {
|
70 |
+
if (get_stack_top() != this) {
|
71 |
+
pybind11_fail("loader_life_support: internal error");
|
72 |
+
}
|
73 |
+
set_stack_top(parent);
|
74 |
+
for (auto *item : keep_alive) {
|
75 |
+
Py_DECREF(item);
|
76 |
+
}
|
77 |
+
}
|
78 |
+
|
79 |
+
/// This can only be used inside a pybind11-bound function, either by `argument_loader`
|
80 |
+
/// at argument preparation time or by `py::cast()` at execution time.
|
81 |
+
PYBIND11_NOINLINE static void add_patient(handle h) {
|
82 |
+
loader_life_support *frame = get_stack_top();
|
83 |
+
if (!frame) {
|
84 |
+
// NOTE: It would be nice to include the stack frames here, as this indicates
|
85 |
+
// use of pybind11::cast<> outside the normal call framework, finding such
|
86 |
+
// a location is challenging. Developers could consider printing out
|
87 |
+
// stack frame addresses here using something like __builtin_frame_address(0)
|
88 |
+
throw cast_error("When called outside a bound function, py::cast() cannot "
|
89 |
+
"do Python -> C++ conversions which require the creation "
|
90 |
+
"of temporary values");
|
91 |
+
}
|
92 |
+
|
93 |
+
if (frame->keep_alive.insert(h.ptr()).second) {
|
94 |
+
Py_INCREF(h.ptr());
|
95 |
+
}
|
96 |
+
}
|
97 |
+
};
|
98 |
+
|
99 |
+
// Gets the cache entry for the given type, creating it if necessary. The return value is the pair
|
100 |
+
// returned by emplace, i.e. an iterator for the entry and a bool set to `true` if the entry was
|
101 |
+
// just created.
|
102 |
+
inline std::pair<decltype(internals::registered_types_py)::iterator, bool>
|
103 |
+
all_type_info_get_cache(PyTypeObject *type);
|
104 |
+
|
105 |
+
// Band-aid workaround to fix a subtle but serious bug in a minimalistic fashion. See PR #4762.
|
106 |
+
inline void all_type_info_add_base_most_derived_first(std::vector<type_info *> &bases,
|
107 |
+
type_info *addl_base) {
|
108 |
+
for (auto it = bases.begin(); it != bases.end(); it++) {
|
109 |
+
type_info *existing_base = *it;
|
110 |
+
if (PyType_IsSubtype(addl_base->type, existing_base->type) != 0) {
|
111 |
+
bases.insert(it, addl_base);
|
112 |
+
return;
|
113 |
+
}
|
114 |
+
}
|
115 |
+
bases.push_back(addl_base);
|
116 |
+
}
|
117 |
+
|
118 |
+
// Populates a just-created cache entry.
|
119 |
+
PYBIND11_NOINLINE void all_type_info_populate(PyTypeObject *t, std::vector<type_info *> &bases) {
|
120 |
+
assert(bases.empty());
|
121 |
+
std::vector<PyTypeObject *> check;
|
122 |
+
for (handle parent : reinterpret_borrow<tuple>(t->tp_bases)) {
|
123 |
+
check.push_back((PyTypeObject *) parent.ptr());
|
124 |
+
}
|
125 |
+
|
126 |
+
auto const &type_dict = get_internals().registered_types_py;
|
127 |
+
for (size_t i = 0; i < check.size(); i++) {
|
128 |
+
auto *type = check[i];
|
129 |
+
// Ignore Python2 old-style class super type:
|
130 |
+
if (!PyType_Check((PyObject *) type)) {
|
131 |
+
continue;
|
132 |
+
}
|
133 |
+
|
134 |
+
// Check `type` in the current set of registered python types:
|
135 |
+
auto it = type_dict.find(type);
|
136 |
+
if (it != type_dict.end()) {
|
137 |
+
// We found a cache entry for it, so it's either pybind-registered or has pre-computed
|
138 |
+
// pybind bases, but we have to make sure we haven't already seen the type(s) before:
|
139 |
+
// we want to follow Python/virtual C++ rules that there should only be one instance of
|
140 |
+
// a common base.
|
141 |
+
for (auto *tinfo : it->second) {
|
142 |
+
// NB: Could use a second set here, rather than doing a linear search, but since
|
143 |
+
// having a large number of immediate pybind11-registered types seems fairly
|
144 |
+
// unlikely, that probably isn't worthwhile.
|
145 |
+
bool found = false;
|
146 |
+
for (auto *known : bases) {
|
147 |
+
if (known == tinfo) {
|
148 |
+
found = true;
|
149 |
+
break;
|
150 |
+
}
|
151 |
+
}
|
152 |
+
if (!found) {
|
153 |
+
all_type_info_add_base_most_derived_first(bases, tinfo);
|
154 |
+
}
|
155 |
+
}
|
156 |
+
} else if (type->tp_bases) {
|
157 |
+
// It's some python type, so keep follow its bases classes to look for one or more
|
158 |
+
// registered types
|
159 |
+
if (i + 1 == check.size()) {
|
160 |
+
// When we're at the end, we can pop off the current element to avoid growing
|
161 |
+
// `check` when adding just one base (which is typical--i.e. when there is no
|
162 |
+
// multiple inheritance)
|
163 |
+
check.pop_back();
|
164 |
+
i--;
|
165 |
+
}
|
166 |
+
for (handle parent : reinterpret_borrow<tuple>(type->tp_bases)) {
|
167 |
+
check.push_back((PyTypeObject *) parent.ptr());
|
168 |
+
}
|
169 |
+
}
|
170 |
+
}
|
171 |
+
}
|
172 |
+
|
173 |
+
/**
|
174 |
+
* Extracts vector of type_info pointers of pybind-registered roots of the given Python type. Will
|
175 |
+
* be just 1 pybind type for the Python type of a pybind-registered class, or for any Python-side
|
176 |
+
* derived class that uses single inheritance. Will contain as many types as required for a Python
|
177 |
+
* class that uses multiple inheritance to inherit (directly or indirectly) from multiple
|
178 |
+
* pybind-registered classes. Will be empty if neither the type nor any base classes are
|
179 |
+
* pybind-registered.
|
180 |
+
*
|
181 |
+
* The value is cached for the lifetime of the Python type.
|
182 |
+
*/
|
183 |
+
inline const std::vector<detail::type_info *> &all_type_info(PyTypeObject *type) {
|
184 |
+
auto ins = all_type_info_get_cache(type);
|
185 |
+
if (ins.second) {
|
186 |
+
// New cache entry: populate it
|
187 |
+
all_type_info_populate(type, ins.first->second);
|
188 |
+
}
|
189 |
+
|
190 |
+
return ins.first->second;
|
191 |
+
}
|
192 |
+
|
193 |
+
/**
|
194 |
+
* Gets a single pybind11 type info for a python type. Returns nullptr if neither the type nor any
|
195 |
+
* ancestors are pybind11-registered. Throws an exception if there are multiple bases--use
|
196 |
+
* `all_type_info` instead if you want to support multiple bases.
|
197 |
+
*/
|
198 |
+
PYBIND11_NOINLINE detail::type_info *get_type_info(PyTypeObject *type) {
|
199 |
+
const auto &bases = all_type_info(type);
|
200 |
+
if (bases.empty()) {
|
201 |
+
return nullptr;
|
202 |
+
}
|
203 |
+
if (bases.size() > 1) {
|
204 |
+
pybind11_fail(
|
205 |
+
"pybind11::detail::get_type_info: type has multiple pybind11-registered bases");
|
206 |
+
}
|
207 |
+
return bases.front();
|
208 |
+
}
|
209 |
+
|
210 |
+
inline detail::type_info *get_local_type_info(const std::type_index &tp) {
|
211 |
+
auto &locals = get_local_internals().registered_types_cpp;
|
212 |
+
auto it = locals.find(tp);
|
213 |
+
if (it != locals.end()) {
|
214 |
+
return it->second;
|
215 |
+
}
|
216 |
+
return nullptr;
|
217 |
+
}
|
218 |
+
|
219 |
+
inline detail::type_info *get_global_type_info(const std::type_index &tp) {
|
220 |
+
auto &types = get_internals().registered_types_cpp;
|
221 |
+
auto it = types.find(tp);
|
222 |
+
if (it != types.end()) {
|
223 |
+
return it->second;
|
224 |
+
}
|
225 |
+
return nullptr;
|
226 |
+
}
|
227 |
+
|
228 |
+
/// Return the type info for a given C++ type; on lookup failure can either throw or return
|
229 |
+
/// nullptr.
|
230 |
+
PYBIND11_NOINLINE detail::type_info *get_type_info(const std::type_index &tp,
|
231 |
+
bool throw_if_missing = false) {
|
232 |
+
if (auto *ltype = get_local_type_info(tp)) {
|
233 |
+
return ltype;
|
234 |
+
}
|
235 |
+
if (auto *gtype = get_global_type_info(tp)) {
|
236 |
+
return gtype;
|
237 |
+
}
|
238 |
+
|
239 |
+
if (throw_if_missing) {
|
240 |
+
std::string tname = tp.name();
|
241 |
+
detail::clean_type_id(tname);
|
242 |
+
pybind11_fail("pybind11::detail::get_type_info: unable to find type info for \""
|
243 |
+
+ std::move(tname) + '"');
|
244 |
+
}
|
245 |
+
return nullptr;
|
246 |
+
}
|
247 |
+
|
248 |
+
PYBIND11_NOINLINE handle get_type_handle(const std::type_info &tp, bool throw_if_missing) {
|
249 |
+
detail::type_info *type_info = get_type_info(tp, throw_if_missing);
|
250 |
+
return handle(type_info ? ((PyObject *) type_info->type) : nullptr);
|
251 |
+
}
|
252 |
+
|
253 |
+
// Searches the inheritance graph for a registered Python instance, using all_type_info().
|
254 |
+
PYBIND11_NOINLINE handle find_registered_python_instance(void *src,
|
255 |
+
const detail::type_info *tinfo) {
|
256 |
+
auto it_instances = get_internals().registered_instances.equal_range(src);
|
257 |
+
for (auto it_i = it_instances.first; it_i != it_instances.second; ++it_i) {
|
258 |
+
for (auto *instance_type : detail::all_type_info(Py_TYPE(it_i->second))) {
|
259 |
+
if (instance_type && same_type(*instance_type->cpptype, *tinfo->cpptype)) {
|
260 |
+
return handle((PyObject *) it_i->second).inc_ref();
|
261 |
+
}
|
262 |
+
}
|
263 |
+
}
|
264 |
+
return handle();
|
265 |
+
}
|
266 |
+
|
267 |
+
struct value_and_holder {
|
268 |
+
instance *inst = nullptr;
|
269 |
+
size_t index = 0u;
|
270 |
+
const detail::type_info *type = nullptr;
|
271 |
+
void **vh = nullptr;
|
272 |
+
|
273 |
+
// Main constructor for a found value/holder:
|
274 |
+
value_and_holder(instance *i, const detail::type_info *type, size_t vpos, size_t index)
|
275 |
+
: inst{i}, index{index}, type{type},
|
276 |
+
vh{inst->simple_layout ? inst->simple_value_holder
|
277 |
+
: &inst->nonsimple.values_and_holders[vpos]} {}
|
278 |
+
|
279 |
+
// Default constructor (used to signal a value-and-holder not found by get_value_and_holder())
|
280 |
+
value_and_holder() = default;
|
281 |
+
|
282 |
+
// Used for past-the-end iterator
|
283 |
+
explicit value_and_holder(size_t index) : index{index} {}
|
284 |
+
|
285 |
+
template <typename V = void>
|
286 |
+
V *&value_ptr() const {
|
287 |
+
return reinterpret_cast<V *&>(vh[0]);
|
288 |
+
}
|
289 |
+
// True if this `value_and_holder` has a non-null value pointer
|
290 |
+
explicit operator bool() const { return value_ptr() != nullptr; }
|
291 |
+
|
292 |
+
template <typename H>
|
293 |
+
H &holder() const {
|
294 |
+
return reinterpret_cast<H &>(vh[1]);
|
295 |
+
}
|
296 |
+
bool holder_constructed() const {
|
297 |
+
return inst->simple_layout
|
298 |
+
? inst->simple_holder_constructed
|
299 |
+
: (inst->nonsimple.status[index] & instance::status_holder_constructed) != 0u;
|
300 |
+
}
|
301 |
+
// NOLINTNEXTLINE(readability-make-member-function-const)
|
302 |
+
void set_holder_constructed(bool v = true) {
|
303 |
+
if (inst->simple_layout) {
|
304 |
+
inst->simple_holder_constructed = v;
|
305 |
+
} else if (v) {
|
306 |
+
inst->nonsimple.status[index] |= instance::status_holder_constructed;
|
307 |
+
} else {
|
308 |
+
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_holder_constructed;
|
309 |
+
}
|
310 |
+
}
|
311 |
+
bool instance_registered() const {
|
312 |
+
return inst->simple_layout
|
313 |
+
? inst->simple_instance_registered
|
314 |
+
: ((inst->nonsimple.status[index] & instance::status_instance_registered) != 0);
|
315 |
+
}
|
316 |
+
// NOLINTNEXTLINE(readability-make-member-function-const)
|
317 |
+
void set_instance_registered(bool v = true) {
|
318 |
+
if (inst->simple_layout) {
|
319 |
+
inst->simple_instance_registered = v;
|
320 |
+
} else if (v) {
|
321 |
+
inst->nonsimple.status[index] |= instance::status_instance_registered;
|
322 |
+
} else {
|
323 |
+
inst->nonsimple.status[index] &= (std::uint8_t) ~instance::status_instance_registered;
|
324 |
+
}
|
325 |
+
}
|
326 |
+
};
|
327 |
+
|
328 |
+
// Container for accessing and iterating over an instance's values/holders
|
329 |
+
struct values_and_holders {
|
330 |
+
private:
|
331 |
+
instance *inst;
|
332 |
+
using type_vec = std::vector<detail::type_info *>;
|
333 |
+
const type_vec &tinfo;
|
334 |
+
|
335 |
+
public:
|
336 |
+
explicit values_and_holders(instance *inst)
|
337 |
+
: inst{inst}, tinfo(all_type_info(Py_TYPE(inst))) {}
|
338 |
+
|
339 |
+
explicit values_and_holders(PyObject *obj)
|
340 |
+
: inst{nullptr}, tinfo(all_type_info(Py_TYPE(obj))) {
|
341 |
+
if (!tinfo.empty()) {
|
342 |
+
inst = reinterpret_cast<instance *>(obj);
|
343 |
+
}
|
344 |
+
}
|
345 |
+
|
346 |
+
struct iterator {
|
347 |
+
private:
|
348 |
+
instance *inst = nullptr;
|
349 |
+
const type_vec *types = nullptr;
|
350 |
+
value_and_holder curr;
|
351 |
+
friend struct values_and_holders;
|
352 |
+
iterator(instance *inst, const type_vec *tinfo) : inst{inst}, types{tinfo} {
|
353 |
+
if (inst != nullptr) {
|
354 |
+
assert(!types->empty());
|
355 |
+
curr = value_and_holder(
|
356 |
+
inst /* instance */,
|
357 |
+
(*types)[0] /* type info */,
|
358 |
+
0, /* vpos: (non-simple types only): the first vptr comes first */
|
359 |
+
0 /* index */);
|
360 |
+
}
|
361 |
+
}
|
362 |
+
// Past-the-end iterator:
|
363 |
+
explicit iterator(size_t end) : curr(end) {}
|
364 |
+
|
365 |
+
public:
|
366 |
+
bool operator==(const iterator &other) const { return curr.index == other.curr.index; }
|
367 |
+
bool operator!=(const iterator &other) const { return curr.index != other.curr.index; }
|
368 |
+
iterator &operator++() {
|
369 |
+
if (!inst->simple_layout) {
|
370 |
+
curr.vh += 1 + (*types)[curr.index]->holder_size_in_ptrs;
|
371 |
+
}
|
372 |
+
++curr.index;
|
373 |
+
curr.type = curr.index < types->size() ? (*types)[curr.index] : nullptr;
|
374 |
+
return *this;
|
375 |
+
}
|
376 |
+
value_and_holder &operator*() { return curr; }
|
377 |
+
value_and_holder *operator->() { return &curr; }
|
378 |
+
};
|
379 |
+
|
380 |
+
iterator begin() { return iterator(inst, &tinfo); }
|
381 |
+
iterator end() { return iterator(tinfo.size()); }
|
382 |
+
|
383 |
+
iterator find(const type_info *find_type) {
|
384 |
+
auto it = begin(), endit = end();
|
385 |
+
while (it != endit && it->type != find_type) {
|
386 |
+
++it;
|
387 |
+
}
|
388 |
+
return it;
|
389 |
+
}
|
390 |
+
|
391 |
+
size_t size() { return tinfo.size(); }
|
392 |
+
|
393 |
+
// Band-aid workaround to fix a subtle but serious bug in a minimalistic fashion. See PR #4762.
|
394 |
+
bool is_redundant_value_and_holder(const value_and_holder &vh) {
|
395 |
+
for (size_t i = 0; i < vh.index; i++) {
|
396 |
+
if (PyType_IsSubtype(tinfo[i]->type, tinfo[vh.index]->type) != 0) {
|
397 |
+
return true;
|
398 |
+
}
|
399 |
+
}
|
400 |
+
return false;
|
401 |
+
}
|
402 |
+
};
|
403 |
+
|
404 |
+
/**
|
405 |
+
* Extracts C++ value and holder pointer references from an instance (which may contain multiple
|
406 |
+
* values/holders for python-side multiple inheritance) that match the given type. Throws an error
|
407 |
+
* if the given type (or ValueType, if omitted) is not a pybind11 base of the given instance. If
|
408 |
+
* `find_type` is omitted (or explicitly specified as nullptr) the first value/holder are returned,
|
409 |
+
* regardless of type (and the resulting .type will be nullptr).
|
410 |
+
*
|
411 |
+
* The returned object should be short-lived: in particular, it must not outlive the called-upon
|
412 |
+
* instance.
|
413 |
+
*/
|
414 |
+
PYBIND11_NOINLINE value_and_holder
|
415 |
+
instance::get_value_and_holder(const type_info *find_type /*= nullptr default in common.h*/,
|
416 |
+
bool throw_if_missing /*= true in common.h*/) {
|
417 |
+
// Optimize common case:
|
418 |
+
if (!find_type || Py_TYPE(this) == find_type->type) {
|
419 |
+
return value_and_holder(this, find_type, 0, 0);
|
420 |
+
}
|
421 |
+
|
422 |
+
detail::values_and_holders vhs(this);
|
423 |
+
auto it = vhs.find(find_type);
|
424 |
+
if (it != vhs.end()) {
|
425 |
+
return *it;
|
426 |
+
}
|
427 |
+
|
428 |
+
if (!throw_if_missing) {
|
429 |
+
return value_and_holder();
|
430 |
+
}
|
431 |
+
|
432 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
433 |
+
pybind11_fail("pybind11::detail::instance::get_value_and_holder: `"
|
434 |
+
+ get_fully_qualified_tp_name(find_type->type)
|
435 |
+
+ "' is not a pybind11 base of the given `"
|
436 |
+
+ get_fully_qualified_tp_name(Py_TYPE(this)) + "' instance");
|
437 |
+
#else
|
438 |
+
pybind11_fail(
|
439 |
+
"pybind11::detail::instance::get_value_and_holder: "
|
440 |
+
"type is not a pybind11 base of the given instance "
|
441 |
+
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for type details)");
|
442 |
+
#endif
|
443 |
+
}
|
444 |
+
|
445 |
+
PYBIND11_NOINLINE void instance::allocate_layout() {
|
446 |
+
const auto &tinfo = all_type_info(Py_TYPE(this));
|
447 |
+
|
448 |
+
const size_t n_types = tinfo.size();
|
449 |
+
|
450 |
+
if (n_types == 0) {
|
451 |
+
pybind11_fail(
|
452 |
+
"instance allocation failed: new instance has no pybind11-registered base types");
|
453 |
+
}
|
454 |
+
|
455 |
+
simple_layout
|
456 |
+
= n_types == 1 && tinfo.front()->holder_size_in_ptrs <= instance_simple_holder_in_ptrs();
|
457 |
+
|
458 |
+
// Simple path: no python-side multiple inheritance, and a small-enough holder
|
459 |
+
if (simple_layout) {
|
460 |
+
simple_value_holder[0] = nullptr;
|
461 |
+
simple_holder_constructed = false;
|
462 |
+
simple_instance_registered = false;
|
463 |
+
} else { // multiple base types or a too-large holder
|
464 |
+
// Allocate space to hold: [v1*][h1][v2*][h2]...[bb...] where [vN*] is a value pointer,
|
465 |
+
// [hN] is the (uninitialized) holder instance for value N, and [bb...] is a set of bool
|
466 |
+
// values that tracks whether each associated holder has been initialized. Each [block] is
|
467 |
+
// padded, if necessary, to an integer multiple of sizeof(void *).
|
468 |
+
size_t space = 0;
|
469 |
+
for (auto *t : tinfo) {
|
470 |
+
space += 1; // value pointer
|
471 |
+
space += t->holder_size_in_ptrs; // holder instance
|
472 |
+
}
|
473 |
+
size_t flags_at = space;
|
474 |
+
space += size_in_ptrs(n_types); // status bytes (holder_constructed and
|
475 |
+
// instance_registered)
|
476 |
+
|
477 |
+
// Allocate space for flags, values, and holders, and initialize it to 0 (flags and values,
|
478 |
+
// in particular, need to be 0). Use Python's memory allocation
|
479 |
+
// functions: Python is using pymalloc, which is designed to be
|
480 |
+
// efficient for small allocations like the one we're doing here;
|
481 |
+
// for larger allocations they are just wrappers around malloc.
|
482 |
+
// TODO: is this still true for pure Python 3.6?
|
483 |
+
nonsimple.values_and_holders = (void **) PyMem_Calloc(space, sizeof(void *));
|
484 |
+
if (!nonsimple.values_and_holders) {
|
485 |
+
throw std::bad_alloc();
|
486 |
+
}
|
487 |
+
nonsimple.status
|
488 |
+
= reinterpret_cast<std::uint8_t *>(&nonsimple.values_and_holders[flags_at]);
|
489 |
+
}
|
490 |
+
owned = true;
|
491 |
+
}
|
492 |
+
|
493 |
+
// NOLINTNEXTLINE(readability-make-member-function-const)
|
494 |
+
PYBIND11_NOINLINE void instance::deallocate_layout() {
|
495 |
+
if (!simple_layout) {
|
496 |
+
PyMem_Free(nonsimple.values_and_holders);
|
497 |
+
}
|
498 |
+
}
|
499 |
+
|
500 |
+
PYBIND11_NOINLINE bool isinstance_generic(handle obj, const std::type_info &tp) {
|
501 |
+
handle type = detail::get_type_handle(tp, false);
|
502 |
+
if (!type) {
|
503 |
+
return false;
|
504 |
+
}
|
505 |
+
return isinstance(obj, type);
|
506 |
+
}
|
507 |
+
|
508 |
+
PYBIND11_NOINLINE handle get_object_handle(const void *ptr, const detail::type_info *type) {
|
509 |
+
auto &instances = get_internals().registered_instances;
|
510 |
+
auto range = instances.equal_range(ptr);
|
511 |
+
for (auto it = range.first; it != range.second; ++it) {
|
512 |
+
for (const auto &vh : values_and_holders(it->second)) {
|
513 |
+
if (vh.type == type) {
|
514 |
+
return handle((PyObject *) it->second);
|
515 |
+
}
|
516 |
+
}
|
517 |
+
}
|
518 |
+
return handle();
|
519 |
+
}
|
520 |
+
|
521 |
+
inline PyThreadState *get_thread_state_unchecked() {
|
522 |
+
#if defined(PYPY_VERSION)
|
523 |
+
return PyThreadState_GET();
|
524 |
+
#elif PY_VERSION_HEX < 0x030D0000
|
525 |
+
return _PyThreadState_UncheckedGet();
|
526 |
+
#else
|
527 |
+
return PyThreadState_GetUnchecked();
|
528 |
+
#endif
|
529 |
+
}
|
530 |
+
|
531 |
+
// Forward declarations
|
532 |
+
void keep_alive_impl(handle nurse, handle patient);
|
533 |
+
inline PyObject *make_new_instance(PyTypeObject *type);
|
534 |
+
|
535 |
+
class type_caster_generic {
|
536 |
+
public:
|
537 |
+
PYBIND11_NOINLINE explicit type_caster_generic(const std::type_info &type_info)
|
538 |
+
: typeinfo(get_type_info(type_info)), cpptype(&type_info) {}
|
539 |
+
|
540 |
+
explicit type_caster_generic(const type_info *typeinfo)
|
541 |
+
: typeinfo(typeinfo), cpptype(typeinfo ? typeinfo->cpptype : nullptr) {}
|
542 |
+
|
543 |
+
bool load(handle src, bool convert) { return load_impl<type_caster_generic>(src, convert); }
|
544 |
+
|
545 |
+
PYBIND11_NOINLINE static handle cast(const void *_src,
|
546 |
+
return_value_policy policy,
|
547 |
+
handle parent,
|
548 |
+
const detail::type_info *tinfo,
|
549 |
+
void *(*copy_constructor)(const void *),
|
550 |
+
void *(*move_constructor)(const void *),
|
551 |
+
const void *existing_holder = nullptr) {
|
552 |
+
if (!tinfo) { // no type info: error will be set already
|
553 |
+
return handle();
|
554 |
+
}
|
555 |
+
|
556 |
+
void *src = const_cast<void *>(_src);
|
557 |
+
if (src == nullptr) {
|
558 |
+
return none().release();
|
559 |
+
}
|
560 |
+
|
561 |
+
if (handle registered_inst = find_registered_python_instance(src, tinfo)) {
|
562 |
+
return registered_inst;
|
563 |
+
}
|
564 |
+
|
565 |
+
auto inst = reinterpret_steal<object>(make_new_instance(tinfo->type));
|
566 |
+
auto *wrapper = reinterpret_cast<instance *>(inst.ptr());
|
567 |
+
wrapper->owned = false;
|
568 |
+
void *&valueptr = values_and_holders(wrapper).begin()->value_ptr();
|
569 |
+
|
570 |
+
switch (policy) {
|
571 |
+
case return_value_policy::automatic:
|
572 |
+
case return_value_policy::take_ownership:
|
573 |
+
valueptr = src;
|
574 |
+
wrapper->owned = true;
|
575 |
+
break;
|
576 |
+
|
577 |
+
case return_value_policy::automatic_reference:
|
578 |
+
case return_value_policy::reference:
|
579 |
+
valueptr = src;
|
580 |
+
wrapper->owned = false;
|
581 |
+
break;
|
582 |
+
|
583 |
+
case return_value_policy::copy:
|
584 |
+
if (copy_constructor) {
|
585 |
+
valueptr = copy_constructor(src);
|
586 |
+
} else {
|
587 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
588 |
+
std::string type_name(tinfo->cpptype->name());
|
589 |
+
detail::clean_type_id(type_name);
|
590 |
+
throw cast_error("return_value_policy = copy, but type " + type_name
|
591 |
+
+ " is non-copyable!");
|
592 |
+
#else
|
593 |
+
throw cast_error("return_value_policy = copy, but type is "
|
594 |
+
"non-copyable! (#define PYBIND11_DETAILED_ERROR_MESSAGES or "
|
595 |
+
"compile in debug mode for details)");
|
596 |
+
#endif
|
597 |
+
}
|
598 |
+
wrapper->owned = true;
|
599 |
+
break;
|
600 |
+
|
601 |
+
case return_value_policy::move:
|
602 |
+
if (move_constructor) {
|
603 |
+
valueptr = move_constructor(src);
|
604 |
+
} else if (copy_constructor) {
|
605 |
+
valueptr = copy_constructor(src);
|
606 |
+
} else {
|
607 |
+
#if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
|
608 |
+
std::string type_name(tinfo->cpptype->name());
|
609 |
+
detail::clean_type_id(type_name);
|
610 |
+
throw cast_error("return_value_policy = move, but type " + type_name
|
611 |
+
+ " is neither movable nor copyable!");
|
612 |
+
#else
|
613 |
+
throw cast_error("return_value_policy = move, but type is neither "
|
614 |
+
"movable nor copyable! "
|
615 |
+
"(#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in "
|
616 |
+
"debug mode for details)");
|
617 |
+
#endif
|
618 |
+
}
|
619 |
+
wrapper->owned = true;
|
620 |
+
break;
|
621 |
+
|
622 |
+
case return_value_policy::reference_internal:
|
623 |
+
valueptr = src;
|
624 |
+
wrapper->owned = false;
|
625 |
+
keep_alive_impl(inst, parent);
|
626 |
+
break;
|
627 |
+
|
628 |
+
default:
|
629 |
+
throw cast_error("unhandled return_value_policy: should not happen!");
|
630 |
+
}
|
631 |
+
|
632 |
+
tinfo->init_instance(wrapper, existing_holder);
|
633 |
+
|
634 |
+
return inst.release();
|
635 |
+
}
|
636 |
+
|
637 |
+
// Base methods for generic caster; there are overridden in copyable_holder_caster
|
638 |
+
void load_value(value_and_holder &&v_h) {
|
639 |
+
auto *&vptr = v_h.value_ptr();
|
640 |
+
// Lazy allocation for unallocated values:
|
641 |
+
if (vptr == nullptr) {
|
642 |
+
const auto *type = v_h.type ? v_h.type : typeinfo;
|
643 |
+
if (type->operator_new) {
|
644 |
+
vptr = type->operator_new(type->type_size);
|
645 |
+
} else {
|
646 |
+
#if defined(__cpp_aligned_new) && (!defined(_MSC_VER) || _MSC_VER >= 1912)
|
647 |
+
if (type->type_align > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
|
648 |
+
vptr = ::operator new(type->type_size, std::align_val_t(type->type_align));
|
649 |
+
} else {
|
650 |
+
vptr = ::operator new(type->type_size);
|
651 |
+
}
|
652 |
+
#else
|
653 |
+
vptr = ::operator new(type->type_size);
|
654 |
+
#endif
|
655 |
+
}
|
656 |
+
}
|
657 |
+
value = vptr;
|
658 |
+
}
|
659 |
+
bool try_implicit_casts(handle src, bool convert) {
|
660 |
+
for (const auto &cast : typeinfo->implicit_casts) {
|
661 |
+
type_caster_generic sub_caster(*cast.first);
|
662 |
+
if (sub_caster.load(src, convert)) {
|
663 |
+
value = cast.second(sub_caster.value);
|
664 |
+
return true;
|
665 |
+
}
|
666 |
+
}
|
667 |
+
return false;
|
668 |
+
}
|
669 |
+
bool try_direct_conversions(handle src) {
|
670 |
+
for (auto &converter : *typeinfo->direct_conversions) {
|
671 |
+
if (converter(src.ptr(), value)) {
|
672 |
+
return true;
|
673 |
+
}
|
674 |
+
}
|
675 |
+
return false;
|
676 |
+
}
|
677 |
+
void check_holder_compat() {}
|
678 |
+
|
679 |
+
PYBIND11_NOINLINE static void *local_load(PyObject *src, const type_info *ti) {
|
680 |
+
auto caster = type_caster_generic(ti);
|
681 |
+
if (caster.load(src, false)) {
|
682 |
+
return caster.value;
|
683 |
+
}
|
684 |
+
return nullptr;
|
685 |
+
}
|
686 |
+
|
687 |
+
/// Try to load with foreign typeinfo, if available. Used when there is no
|
688 |
+
/// native typeinfo, or when the native one wasn't able to produce a value.
|
689 |
+
PYBIND11_NOINLINE bool try_load_foreign_module_local(handle src) {
|
690 |
+
constexpr auto *local_key = PYBIND11_MODULE_LOCAL_ID;
|
691 |
+
const auto pytype = type::handle_of(src);
|
692 |
+
if (!hasattr(pytype, local_key)) {
|
693 |
+
return false;
|
694 |
+
}
|
695 |
+
|
696 |
+
type_info *foreign_typeinfo = reinterpret_borrow<capsule>(getattr(pytype, local_key));
|
697 |
+
// Only consider this foreign loader if actually foreign and is a loader of the correct cpp
|
698 |
+
// type
|
699 |
+
if (foreign_typeinfo->module_local_load == &local_load
|
700 |
+
|| (cpptype && !same_type(*cpptype, *foreign_typeinfo->cpptype))) {
|
701 |
+
return false;
|
702 |
+
}
|
703 |
+
|
704 |
+
if (auto *result = foreign_typeinfo->module_local_load(src.ptr(), foreign_typeinfo)) {
|
705 |
+
value = result;
|
706 |
+
return true;
|
707 |
+
}
|
708 |
+
return false;
|
709 |
+
}
|
710 |
+
|
711 |
+
// Implementation of `load`; this takes the type of `this` so that it can dispatch the relevant
|
712 |
+
// bits of code between here and copyable_holder_caster where the two classes need different
|
713 |
+
// logic (without having to resort to virtual inheritance).
|
714 |
+
template <typename ThisT>
|
715 |
+
PYBIND11_NOINLINE bool load_impl(handle src, bool convert) {
|
716 |
+
if (!src) {
|
717 |
+
return false;
|
718 |
+
}
|
719 |
+
if (!typeinfo) {
|
720 |
+
return try_load_foreign_module_local(src);
|
721 |
+
}
|
722 |
+
|
723 |
+
auto &this_ = static_cast<ThisT &>(*this);
|
724 |
+
this_.check_holder_compat();
|
725 |
+
|
726 |
+
PyTypeObject *srctype = Py_TYPE(src.ptr());
|
727 |
+
|
728 |
+
// Case 1: If src is an exact type match for the target type then we can reinterpret_cast
|
729 |
+
// the instance's value pointer to the target type:
|
730 |
+
if (srctype == typeinfo->type) {
|
731 |
+
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
|
732 |
+
return true;
|
733 |
+
}
|
734 |
+
// Case 2: We have a derived class
|
735 |
+
if (PyType_IsSubtype(srctype, typeinfo->type)) {
|
736 |
+
const auto &bases = all_type_info(srctype);
|
737 |
+
bool no_cpp_mi = typeinfo->simple_type;
|
738 |
+
|
739 |
+
// Case 2a: the python type is a Python-inherited derived class that inherits from just
|
740 |
+
// one simple (no MI) pybind11 class, or is an exact match, so the C++ instance is of
|
741 |
+
// the right type and we can use reinterpret_cast.
|
742 |
+
// (This is essentially the same as case 2b, but because not using multiple inheritance
|
743 |
+
// is extremely common, we handle it specially to avoid the loop iterator and type
|
744 |
+
// pointer lookup overhead)
|
745 |
+
if (bases.size() == 1 && (no_cpp_mi || bases.front()->type == typeinfo->type)) {
|
746 |
+
this_.load_value(reinterpret_cast<instance *>(src.ptr())->get_value_and_holder());
|
747 |
+
return true;
|
748 |
+
}
|
749 |
+
// Case 2b: the python type inherits from multiple C++ bases. Check the bases to see
|
750 |
+
// if we can find an exact match (or, for a simple C++ type, an inherited match); if
|
751 |
+
// so, we can safely reinterpret_cast to the relevant pointer.
|
752 |
+
if (bases.size() > 1) {
|
753 |
+
for (auto *base : bases) {
|
754 |
+
if (no_cpp_mi ? PyType_IsSubtype(base->type, typeinfo->type)
|
755 |
+
: base->type == typeinfo->type) {
|
756 |
+
this_.load_value(
|
757 |
+
reinterpret_cast<instance *>(src.ptr())->get_value_and_holder(base));
|
758 |
+
return true;
|
759 |
+
}
|
760 |
+
}
|
761 |
+
}
|
762 |
+
|
763 |
+
// Case 2c: C++ multiple inheritance is involved and we couldn't find an exact type
|
764 |
+
// match in the registered bases, above, so try implicit casting (needed for proper C++
|
765 |
+
// casting when MI is involved).
|
766 |
+
if (this_.try_implicit_casts(src, convert)) {
|
767 |
+
return true;
|
768 |
+
}
|
769 |
+
}
|
770 |
+
|
771 |
+
// Perform an implicit conversion
|
772 |
+
if (convert) {
|
773 |
+
for (const auto &converter : typeinfo->implicit_conversions) {
|
774 |
+
auto temp = reinterpret_steal<object>(converter(src.ptr(), typeinfo->type));
|
775 |
+
if (load_impl<ThisT>(temp, false)) {
|
776 |
+
loader_life_support::add_patient(temp);
|
777 |
+
return true;
|
778 |
+
}
|
779 |
+
}
|
780 |
+
if (this_.try_direct_conversions(src)) {
|
781 |
+
return true;
|
782 |
+
}
|
783 |
+
}
|
784 |
+
|
785 |
+
// Failed to match local typeinfo. Try again with global.
|
786 |
+
if (typeinfo->module_local) {
|
787 |
+
if (auto *gtype = get_global_type_info(*typeinfo->cpptype)) {
|
788 |
+
typeinfo = gtype;
|
789 |
+
return load(src, false);
|
790 |
+
}
|
791 |
+
}
|
792 |
+
|
793 |
+
// Global typeinfo has precedence over foreign module_local
|
794 |
+
if (try_load_foreign_module_local(src)) {
|
795 |
+
return true;
|
796 |
+
}
|
797 |
+
|
798 |
+
// Custom converters didn't take None, now we convert None to nullptr.
|
799 |
+
if (src.is_none()) {
|
800 |
+
// Defer accepting None to other overloads (if we aren't in convert mode):
|
801 |
+
if (!convert) {
|
802 |
+
return false;
|
803 |
+
}
|
804 |
+
value = nullptr;
|
805 |
+
return true;
|
806 |
+
}
|
807 |
+
|
808 |
+
return false;
|
809 |
+
}
|
810 |
+
|
811 |
+
// Called to do type lookup and wrap the pointer and type in a pair when a dynamic_cast
|
812 |
+
// isn't needed or can't be used. If the type is unknown, sets the error and returns a pair
|
813 |
+
// with .second = nullptr. (p.first = nullptr is not an error: it becomes None).
|
814 |
+
PYBIND11_NOINLINE static std::pair<const void *, const type_info *>
|
815 |
+
src_and_type(const void *src,
|
816 |
+
const std::type_info &cast_type,
|
817 |
+
const std::type_info *rtti_type = nullptr) {
|
818 |
+
if (auto *tpi = get_type_info(cast_type)) {
|
819 |
+
return {src, const_cast<const type_info *>(tpi)};
|
820 |
+
}
|
821 |
+
|
822 |
+
// Not found, set error:
|
823 |
+
std::string tname = rtti_type ? rtti_type->name() : cast_type.name();
|
824 |
+
detail::clean_type_id(tname);
|
825 |
+
std::string msg = "Unregistered type : " + tname;
|
826 |
+
set_error(PyExc_TypeError, msg.c_str());
|
827 |
+
return {nullptr, nullptr};
|
828 |
+
}
|
829 |
+
|
830 |
+
const type_info *typeinfo = nullptr;
|
831 |
+
const std::type_info *cpptype = nullptr;
|
832 |
+
void *value = nullptr;
|
833 |
+
};
|
834 |
+
|
835 |
+
/**
|
836 |
+
* Determine suitable casting operator for pointer-or-lvalue-casting type casters. The type caster
|
837 |
+
* needs to provide `operator T*()` and `operator T&()` operators.
|
838 |
+
*
|
839 |
+
* If the type supports moving the value away via an `operator T&&() &&` method, it should use
|
840 |
+
* `movable_cast_op_type` instead.
|
841 |
+
*/
|
842 |
+
template <typename T>
|
843 |
+
using cast_op_type = conditional_t<std::is_pointer<remove_reference_t<T>>::value,
|
844 |
+
typename std::add_pointer<intrinsic_t<T>>::type,
|
845 |
+
typename std::add_lvalue_reference<intrinsic_t<T>>::type>;
|
846 |
+
|
847 |
+
/**
|
848 |
+
* Determine suitable casting operator for a type caster with a movable value. Such a type caster
|
849 |
+
* needs to provide `operator T*()`, `operator T&()`, and `operator T&&() &&`. The latter will be
|
850 |
+
* called in appropriate contexts where the value can be moved rather than copied.
|
851 |
+
*
|
852 |
+
* These operator are automatically provided when using the PYBIND11_TYPE_CASTER macro.
|
853 |
+
*/
|
854 |
+
template <typename T>
|
855 |
+
using movable_cast_op_type
|
856 |
+
= conditional_t<std::is_pointer<typename std::remove_reference<T>::type>::value,
|
857 |
+
typename std::add_pointer<intrinsic_t<T>>::type,
|
858 |
+
conditional_t<std::is_rvalue_reference<T>::value,
|
859 |
+
typename std::add_rvalue_reference<intrinsic_t<T>>::type,
|
860 |
+
typename std::add_lvalue_reference<intrinsic_t<T>>::type>>;
|
861 |
+
|
862 |
+
// Does the container have a mapped type and is it recursive?
|
863 |
+
// Implemented by specializations below.
|
864 |
+
template <typename Container, typename SFINAE = void>
|
865 |
+
struct container_mapped_type_traits {
|
866 |
+
static constexpr bool has_mapped_type = false;
|
867 |
+
static constexpr bool has_recursive_mapped_type = false;
|
868 |
+
};
|
869 |
+
|
870 |
+
template <typename Container>
|
871 |
+
struct container_mapped_type_traits<
|
872 |
+
Container,
|
873 |
+
typename std::enable_if<
|
874 |
+
std::is_same<typename Container::mapped_type, Container>::value>::type> {
|
875 |
+
static constexpr bool has_mapped_type = true;
|
876 |
+
static constexpr bool has_recursive_mapped_type = true;
|
877 |
+
};
|
878 |
+
|
879 |
+
template <typename Container>
|
880 |
+
struct container_mapped_type_traits<
|
881 |
+
Container,
|
882 |
+
typename std::enable_if<
|
883 |
+
negation<std::is_same<typename Container::mapped_type, Container>>::value>::type> {
|
884 |
+
static constexpr bool has_mapped_type = true;
|
885 |
+
static constexpr bool has_recursive_mapped_type = false;
|
886 |
+
};
|
887 |
+
|
888 |
+
// Does the container have a value type and is it recursive?
|
889 |
+
// Implemented by specializations below.
|
890 |
+
template <typename Container, typename SFINAE = void>
|
891 |
+
struct container_value_type_traits : std::false_type {
|
892 |
+
static constexpr bool has_value_type = false;
|
893 |
+
static constexpr bool has_recursive_value_type = false;
|
894 |
+
};
|
895 |
+
|
896 |
+
template <typename Container>
|
897 |
+
struct container_value_type_traits<
|
898 |
+
Container,
|
899 |
+
typename std::enable_if<
|
900 |
+
std::is_same<typename Container::value_type, Container>::value>::type> {
|
901 |
+
static constexpr bool has_value_type = true;
|
902 |
+
static constexpr bool has_recursive_value_type = true;
|
903 |
+
};
|
904 |
+
|
905 |
+
template <typename Container>
|
906 |
+
struct container_value_type_traits<
|
907 |
+
Container,
|
908 |
+
typename std::enable_if<
|
909 |
+
negation<std::is_same<typename Container::value_type, Container>>::value>::type> {
|
910 |
+
static constexpr bool has_value_type = true;
|
911 |
+
static constexpr bool has_recursive_value_type = false;
|
912 |
+
};
|
913 |
+
|
914 |
+
/*
|
915 |
+
* Tag to be used for representing the bottom of recursively defined types.
|
916 |
+
* Define this tag so we don't have to use void.
|
917 |
+
*/
|
918 |
+
struct recursive_bottom {};
|
919 |
+
|
920 |
+
/*
|
921 |
+
* Implementation detail of `recursive_container_traits` below.
|
922 |
+
* `T` is the `value_type` of the container, which might need to be modified to
|
923 |
+
* avoid recursive types and const types.
|
924 |
+
*/
|
925 |
+
template <typename T, bool is_this_a_map>
|
926 |
+
struct impl_type_to_check_recursively {
|
927 |
+
/*
|
928 |
+
* If the container is recursive, then no further recursion should be done.
|
929 |
+
*/
|
930 |
+
using if_recursive = recursive_bottom;
|
931 |
+
/*
|
932 |
+
* Otherwise yield `T` unchanged.
|
933 |
+
*/
|
934 |
+
using if_not_recursive = T;
|
935 |
+
};
|
936 |
+
|
937 |
+
/*
|
938 |
+
* For pairs - only as value type of a map -, the first type should remove the `const`.
|
939 |
+
* Also, if the map is recursive, then the recursive checking should consider
|
940 |
+
* the first type only.
|
941 |
+
*/
|
942 |
+
template <typename A, typename B>
|
943 |
+
struct impl_type_to_check_recursively<std::pair<A, B>, /* is_this_a_map = */ true> {
|
944 |
+
using if_recursive = typename std::remove_const<A>::type;
|
945 |
+
using if_not_recursive = std::pair<typename std::remove_const<A>::type, B>;
|
946 |
+
};
|
947 |
+
|
948 |
+
/*
|
949 |
+
* Implementation of `recursive_container_traits` below.
|
950 |
+
*/
|
951 |
+
template <typename Container, typename SFINAE = void>
|
952 |
+
struct impl_recursive_container_traits {
|
953 |
+
using type_to_check_recursively = recursive_bottom;
|
954 |
+
};
|
955 |
+
|
956 |
+
template <typename Container>
|
957 |
+
struct impl_recursive_container_traits<
|
958 |
+
Container,
|
959 |
+
typename std::enable_if<container_value_type_traits<Container>::has_value_type>::type> {
|
960 |
+
static constexpr bool is_recursive
|
961 |
+
= container_mapped_type_traits<Container>::has_recursive_mapped_type
|
962 |
+
|| container_value_type_traits<Container>::has_recursive_value_type;
|
963 |
+
/*
|
964 |
+
* This member dictates which type Pybind11 should check recursively in traits
|
965 |
+
* such as `is_move_constructible`, `is_copy_constructible`, `is_move_assignable`, ...
|
966 |
+
* Direct access to `value_type` should be avoided:
|
967 |
+
* 1. `value_type` might recursively contain the type again
|
968 |
+
* 2. `value_type` of STL map types is `std::pair<A const, B>`, the `const`
|
969 |
+
* should be removed.
|
970 |
+
*
|
971 |
+
*/
|
972 |
+
using type_to_check_recursively = typename std::conditional<
|
973 |
+
is_recursive,
|
974 |
+
typename impl_type_to_check_recursively<
|
975 |
+
typename Container::value_type,
|
976 |
+
container_mapped_type_traits<Container>::has_mapped_type>::if_recursive,
|
977 |
+
typename impl_type_to_check_recursively<
|
978 |
+
typename Container::value_type,
|
979 |
+
container_mapped_type_traits<Container>::has_mapped_type>::if_not_recursive>::type;
|
980 |
+
};
|
981 |
+
|
982 |
+
/*
|
983 |
+
* This trait defines the `type_to_check_recursively` which is needed to properly
|
984 |
+
* handle recursively defined traits such as `is_move_constructible` without going
|
985 |
+
* into an infinite recursion.
|
986 |
+
* Should be used instead of directly accessing the `value_type`.
|
987 |
+
* It cancels the recursion by returning the `recursive_bottom` tag.
|
988 |
+
*
|
989 |
+
* The default definition of `type_to_check_recursively` is as follows:
|
990 |
+
*
|
991 |
+
* 1. By default, it is `recursive_bottom`, so that the recursion is canceled.
|
992 |
+
* 2. If the type is non-recursive and defines a `value_type`, then the `value_type` is used.
|
993 |
+
* If the `value_type` is a pair and a `mapped_type` is defined,
|
994 |
+
* then the `const` is removed from the first type.
|
995 |
+
* 3. If the type is recursive and `value_type` is not a pair, then `recursive_bottom` is returned.
|
996 |
+
* 4. If the type is recursive and `value_type` is a pair and a `mapped_type` is defined,
|
997 |
+
* then `const` is removed from the first type and the first type is returned.
|
998 |
+
*
|
999 |
+
* This behavior can be extended by the user as seen in test_stl_binders.cpp.
|
1000 |
+
*
|
1001 |
+
* This struct is exactly the same as impl_recursive_container_traits.
|
1002 |
+
* The duplication achieves that user-defined specializations don't compete
|
1003 |
+
* with internal specializations, but take precedence.
|
1004 |
+
*/
|
1005 |
+
template <typename Container, typename SFINAE = void>
|
1006 |
+
struct recursive_container_traits : impl_recursive_container_traits<Container> {};
|
1007 |
+
|
1008 |
+
template <typename T>
|
1009 |
+
struct is_move_constructible
|
1010 |
+
: all_of<std::is_move_constructible<T>,
|
1011 |
+
is_move_constructible<
|
1012 |
+
typename recursive_container_traits<T>::type_to_check_recursively>> {};
|
1013 |
+
|
1014 |
+
template <>
|
1015 |
+
struct is_move_constructible<recursive_bottom> : std::true_type {};
|
1016 |
+
|
1017 |
+
// Likewise for std::pair
|
1018 |
+
// (after C++17 it is mandatory that the move constructor not exist when the two types aren't
|
1019 |
+
// themselves move constructible, but this can not be relied upon when T1 or T2 are themselves
|
1020 |
+
// containers).
|
1021 |
+
template <typename T1, typename T2>
|
1022 |
+
struct is_move_constructible<std::pair<T1, T2>>
|
1023 |
+
: all_of<is_move_constructible<T1>, is_move_constructible<T2>> {};
|
1024 |
+
|
1025 |
+
// std::is_copy_constructible isn't quite enough: it lets std::vector<T> (and similar) through when
|
1026 |
+
// T is non-copyable, but code containing such a copy constructor fails to actually compile.
|
1027 |
+
template <typename T>
|
1028 |
+
struct is_copy_constructible
|
1029 |
+
: all_of<std::is_copy_constructible<T>,
|
1030 |
+
is_copy_constructible<
|
1031 |
+
typename recursive_container_traits<T>::type_to_check_recursively>> {};
|
1032 |
+
|
1033 |
+
template <>
|
1034 |
+
struct is_copy_constructible<recursive_bottom> : std::true_type {};
|
1035 |
+
|
1036 |
+
// Likewise for std::pair
|
1037 |
+
// (after C++17 it is mandatory that the copy constructor not exist when the two types aren't
|
1038 |
+
// themselves copy constructible, but this can not be relied upon when T1 or T2 are themselves
|
1039 |
+
// containers).
|
1040 |
+
template <typename T1, typename T2>
|
1041 |
+
struct is_copy_constructible<std::pair<T1, T2>>
|
1042 |
+
: all_of<is_copy_constructible<T1>, is_copy_constructible<T2>> {};
|
1043 |
+
|
1044 |
+
// The same problems arise with std::is_copy_assignable, so we use the same workaround.
|
1045 |
+
template <typename T>
|
1046 |
+
struct is_copy_assignable
|
1047 |
+
: all_of<
|
1048 |
+
std::is_copy_assignable<T>,
|
1049 |
+
is_copy_assignable<typename recursive_container_traits<T>::type_to_check_recursively>> {
|
1050 |
+
};
|
1051 |
+
|
1052 |
+
template <>
|
1053 |
+
struct is_copy_assignable<recursive_bottom> : std::true_type {};
|
1054 |
+
|
1055 |
+
template <typename T1, typename T2>
|
1056 |
+
struct is_copy_assignable<std::pair<T1, T2>>
|
1057 |
+
: all_of<is_copy_assignable<T1>, is_copy_assignable<T2>> {};
|
1058 |
+
|
1059 |
+
PYBIND11_NAMESPACE_END(detail)
|
1060 |
+
|
1061 |
+
// polymorphic_type_hook<itype>::get(src, tinfo) determines whether the object pointed
|
1062 |
+
// to by `src` actually is an instance of some class derived from `itype`.
|
1063 |
+
// If so, it sets `tinfo` to point to the std::type_info representing that derived
|
1064 |
+
// type, and returns a pointer to the start of the most-derived object of that type
|
1065 |
+
// (in which `src` is a subobject; this will be the same address as `src` in most
|
1066 |
+
// single inheritance cases). If not, or if `src` is nullptr, it simply returns `src`
|
1067 |
+
// and leaves `tinfo` at its default value of nullptr.
|
1068 |
+
//
|
1069 |
+
// The default polymorphic_type_hook just returns src. A specialization for polymorphic
|
1070 |
+
// types determines the runtime type of the passed object and adjusts the this-pointer
|
1071 |
+
// appropriately via dynamic_cast<void*>. This is what enables a C++ Animal* to appear
|
1072 |
+
// to Python as a Dog (if Dog inherits from Animal, Animal is polymorphic, Dog is
|
1073 |
+
// registered with pybind11, and this Animal is in fact a Dog).
|
1074 |
+
//
|
1075 |
+
// You may specialize polymorphic_type_hook yourself for types that want to appear
|
1076 |
+
// polymorphic to Python but do not use C++ RTTI. (This is a not uncommon pattern
|
1077 |
+
// in performance-sensitive applications, used most notably in LLVM.)
|
1078 |
+
//
|
1079 |
+
// polymorphic_type_hook_base allows users to specialize polymorphic_type_hook with
|
1080 |
+
// std::enable_if. User provided specializations will always have higher priority than
|
1081 |
+
// the default implementation and specialization provided in polymorphic_type_hook_base.
|
1082 |
+
template <typename itype, typename SFINAE = void>
|
1083 |
+
struct polymorphic_type_hook_base {
|
1084 |
+
static const void *get(const itype *src, const std::type_info *&) { return src; }
|
1085 |
+
};
|
1086 |
+
template <typename itype>
|
1087 |
+
struct polymorphic_type_hook_base<itype, detail::enable_if_t<std::is_polymorphic<itype>::value>> {
|
1088 |
+
static const void *get(const itype *src, const std::type_info *&type) {
|
1089 |
+
type = src ? &typeid(*src) : nullptr;
|
1090 |
+
return dynamic_cast<const void *>(src);
|
1091 |
+
}
|
1092 |
+
};
|
1093 |
+
template <typename itype, typename SFINAE = void>
|
1094 |
+
struct polymorphic_type_hook : public polymorphic_type_hook_base<itype> {};
|
1095 |
+
|
1096 |
+
PYBIND11_NAMESPACE_BEGIN(detail)
|
1097 |
+
|
1098 |
+
/// Generic type caster for objects stored on the heap
|
1099 |
+
template <typename type>
|
1100 |
+
class type_caster_base : public type_caster_generic {
|
1101 |
+
using itype = intrinsic_t<type>;
|
1102 |
+
|
1103 |
+
public:
|
1104 |
+
static constexpr auto name = const_name<type>();
|
1105 |
+
|
1106 |
+
type_caster_base() : type_caster_base(typeid(type)) {}
|
1107 |
+
explicit type_caster_base(const std::type_info &info) : type_caster_generic(info) {}
|
1108 |
+
|
1109 |
+
static handle cast(const itype &src, return_value_policy policy, handle parent) {
|
1110 |
+
if (policy == return_value_policy::automatic
|
1111 |
+
|| policy == return_value_policy::automatic_reference) {
|
1112 |
+
policy = return_value_policy::copy;
|
1113 |
+
}
|
1114 |
+
return cast(&src, policy, parent);
|
1115 |
+
}
|
1116 |
+
|
1117 |
+
static handle cast(itype &&src, return_value_policy, handle parent) {
|
1118 |
+
return cast(&src, return_value_policy::move, parent);
|
1119 |
+
}
|
1120 |
+
|
1121 |
+
// Returns a (pointer, type_info) pair taking care of necessary type lookup for a
|
1122 |
+
// polymorphic type (using RTTI by default, but can be overridden by specializing
|
1123 |
+
// polymorphic_type_hook). If the instance isn't derived, returns the base version.
|
1124 |
+
static std::pair<const void *, const type_info *> src_and_type(const itype *src) {
|
1125 |
+
const auto &cast_type = typeid(itype);
|
1126 |
+
const std::type_info *instance_type = nullptr;
|
1127 |
+
const void *vsrc = polymorphic_type_hook<itype>::get(src, instance_type);
|
1128 |
+
if (instance_type && !same_type(cast_type, *instance_type)) {
|
1129 |
+
// This is a base pointer to a derived type. If the derived type is registered
|
1130 |
+
// with pybind11, we want to make the full derived object available.
|
1131 |
+
// In the typical case where itype is polymorphic, we get the correct
|
1132 |
+
// derived pointer (which may be != base pointer) by a dynamic_cast to
|
1133 |
+
// most derived type. If itype is not polymorphic, we won't get here
|
1134 |
+
// except via a user-provided specialization of polymorphic_type_hook,
|
1135 |
+
// and the user has promised that no this-pointer adjustment is
|
1136 |
+
// required in that case, so it's OK to use static_cast.
|
1137 |
+
if (const auto *tpi = get_type_info(*instance_type)) {
|
1138 |
+
return {vsrc, tpi};
|
1139 |
+
}
|
1140 |
+
}
|
1141 |
+
// Otherwise we have either a nullptr, an `itype` pointer, or an unknown derived pointer,
|
1142 |
+
// so don't do a cast
|
1143 |
+
return type_caster_generic::src_and_type(src, cast_type, instance_type);
|
1144 |
+
}
|
1145 |
+
|
1146 |
+
static handle cast(const itype *src, return_value_policy policy, handle parent) {
|
1147 |
+
auto st = src_and_type(src);
|
1148 |
+
return type_caster_generic::cast(st.first,
|
1149 |
+
policy,
|
1150 |
+
parent,
|
1151 |
+
st.second,
|
1152 |
+
make_copy_constructor(src),
|
1153 |
+
make_move_constructor(src));
|
1154 |
+
}
|
1155 |
+
|
1156 |
+
static handle cast_holder(const itype *src, const void *holder) {
|
1157 |
+
auto st = src_and_type(src);
|
1158 |
+
return type_caster_generic::cast(st.first,
|
1159 |
+
return_value_policy::take_ownership,
|
1160 |
+
{},
|
1161 |
+
st.second,
|
1162 |
+
nullptr,
|
1163 |
+
nullptr,
|
1164 |
+
holder);
|
1165 |
+
}
|
1166 |
+
|
1167 |
+
template <typename T>
|
1168 |
+
using cast_op_type = detail::cast_op_type<T>;
|
1169 |
+
|
1170 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
1171 |
+
operator itype *() { return (type *) value; }
|
1172 |
+
// NOLINTNEXTLINE(google-explicit-constructor)
|
1173 |
+
operator itype &() {
|
1174 |
+
if (!value) {
|
1175 |
+
throw reference_cast_error();
|
1176 |
+
}
|
1177 |
+
return *((itype *) value);
|
1178 |
+
}
|
1179 |
+
|
1180 |
+
protected:
|
1181 |
+
using Constructor = void *(*) (const void *);
|
1182 |
+
|
1183 |
+
/* Only enabled when the types are {copy,move}-constructible *and* when the type
|
1184 |
+
does not have a private operator new implementation. A comma operator is used in the
|
1185 |
+
decltype argument to apply SFINAE to the public copy/move constructors.*/
|
1186 |
+
template <typename T, typename = enable_if_t<is_copy_constructible<T>::value>>
|
1187 |
+
static auto make_copy_constructor(const T *)
|
1188 |
+
-> decltype(new T(std::declval<const T>()), Constructor{}) {
|
1189 |
+
return [](const void *arg) -> void * { return new T(*reinterpret_cast<const T *>(arg)); };
|
1190 |
+
}
|
1191 |
+
|
1192 |
+
template <typename T, typename = enable_if_t<is_move_constructible<T>::value>>
|
1193 |
+
static auto make_move_constructor(const T *)
|
1194 |
+
-> decltype(new T(std::declval<T &&>()), Constructor{}) {
|
1195 |
+
return [](const void *arg) -> void * {
|
1196 |
+
return new T(std::move(*const_cast<T *>(reinterpret_cast<const T *>(arg))));
|
1197 |
+
};
|
1198 |
+
}
|
1199 |
+
|
1200 |
+
static Constructor make_copy_constructor(...) { return nullptr; }
|
1201 |
+
static Constructor make_move_constructor(...) { return nullptr; }
|
1202 |
+
};
|
1203 |
+
|
1204 |
+
inline std::string quote_cpp_type_name(const std::string &cpp_type_name) {
|
1205 |
+
return cpp_type_name; // No-op for now. See PR #4888
|
1206 |
+
}
|
1207 |
+
|
1208 |
+
PYBIND11_NOINLINE std::string type_info_description(const std::type_info &ti) {
|
1209 |
+
if (auto *type_data = get_type_info(ti)) {
|
1210 |
+
handle th((PyObject *) type_data->type);
|
1211 |
+
return th.attr("__module__").cast<std::string>() + '.'
|
1212 |
+
+ th.attr("__qualname__").cast<std::string>();
|
1213 |
+
}
|
1214 |
+
return quote_cpp_type_name(clean_type_id(ti.name()));
|
1215 |
+
}
|
1216 |
+
|
1217 |
+
PYBIND11_NAMESPACE_END(detail)
|
1218 |
+
PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
|